diff --git a/.circleci/config.yml b/.circleci/config.yml index cbbaf7102ea..739b3f2fc50 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,81 +1,203 @@ version: "2.1" +orbs: + aws-s3: circleci/aws-s3@2.0.0 + terraform: circleci/terraform@2.1.0 + +parameters: + cross-container-tag: + type: string + default: go1.18.6-9c97f2f2903566a00bd4b00184aeca0c813adda0 + +executors: + cross-builder: + docker: + # NOTE: To upgrade the Go version, first push the upgrade to the cross-builder Dockerfile + # in the edge repo, then update the version here to match. + - image: quay.io/influxdb/cross-builder:<< pipeline.parameters.cross-container-tag >> + resource_class: large + linux-amd64: + machine: + image: ubuntu-2004:202107-02 + resource_class: large + linux-arm64: + machine: + image: ubuntu-2004:202101-01 + resource_class: arm.large + darwin: + macos: + xcode: 12.5.1 + resource_class: medium + shell: /bin/bash -eo pipefail + windows: + machine: + image: windows-server-2019-vs2019:stable + resource_class: windows.medium + shell: bash.exe -eo pipefail + +# Unlike when a commit is pushed to a branch, CircleCI does not automatically +# execute a workflow when a tag is pushed to a repository. These filters +# allow the corresponding workflow to execute on any branch or tag. +any_filter: &any_filter + filters: + tags: + only: /.*/ + branches: + only: /.*/ + +release_filter: &release_filter + filters: + tags: + # This regex matches what is found in 'scripts/get-version' with the + # '[[:digit:]]' transformed into '\d'. This also excludes release + # candidate detection, because this filter only matches against + # full releases. + only: /^v(\d+)(?:\.(\d+))?(?:\.(\d+))?$/ + branches: + ignore: /.*/ + +nofork_filter: &nofork_filter + filters: + branches: + ignore: /pull\/[0-9]+/ + workflows: version: 2 build: jobs: - - godeps - - jsdeps - - gotest: + - test-race: + <<: *any_filter + - test-build: + <<: *any_filter + name: test-build-<< matrix.os >>-<< matrix.arch >> + matrix: + parameters: + os: [ linux, darwin, windows ] + arch: [ amd64, arm64 ] + exclude: + - { os: darwin, arch: arm64 } + - { os: windows, arch: arm64 } + # linux/amd64 can be tested directly from our cross-builder image + # to save time & enable running with the race detector. + - { os: linux, arch: amd64 } + - test-prebuilt: + <<: *any_filter + name: test-linux-arm64 + executor: linux-arm64 requires: - - godeps - - jstest: + - test-build-linux-arm64 + - test-prebuilt: + <<: *any_filter + name: test-darwin + executor: darwin requires: - - jsdeps - - influxql_validation: + - test-build-darwin-amd64 + - test-prebuilt: + <<: *any_filter + name: test-windows + executor: windows requires: - - godeps - - influxql_integration: + - test-build-windows-amd64 + - fluxtest: + <<: *any_filter + - tlstest: + <<: *any_filter + - lint: + <<: *any_filter + - build: + <<: *any_filter + name: build-<< matrix.os >>-<< matrix.arch >> + build-type: snapshot + matrix: + parameters: + os: [ linux, darwin, windows ] + arch: [ amd64, arm64 ] + exclude: + - { os: darwin, arch: arm64 } + - { os: windows, arch: arm64 } + - build-package: + <<: *any_filter + name: build-package-<< matrix.os >>-<< matrix.arch >> requires: - - godeps - - golint: + - build-<< matrix.os >>-<< matrix.arch >> + matrix: + parameters: + os: [ linux, darwin, windows ] + arch: [ amd64, arm64 ] + exclude: + - { os: darwin, arch: arm64 } + - { os: windows, arch: arm64 } + - test-downgrade: + <<: *any_filter requires: - - godeps - - jslint: + - build-linux-amd64 + - e2e-monitor-ci: + <<: *nofork_filter requires: - - jsdeps - - build: + - build-linux-amd64 + - test-linux-packages: + <<: *nofork_filter requires: - - godeps - - jsdeps - - e2e: + - build-package-linux-amd64 + - changelog: + <<: *any_filter + - s3-publish-packages: + <<: *release_filter requires: - - build - - grace_daily: + - test-linux-packages + - build-package-darwin-amd64 + - build-package-linux-amd64 + - build-package-linux-arm64 + - build-package-windows-amd64 + - s3-publish-changelog: + <<: *release_filter + publish-type: release requires: - - build - filters: - branches: - only: /^(?!pull\/).*$/ - - litmus_daily: - requires: - - build - filters: - branches: - only: /^(?!pull\/).*$/ - - cross_build: + - changelog + - perf-test: + record_results: true requires: - - build + - build-package-darwin-amd64 + - build-package-linux-amd64 + - build-package-linux-arm64 + - build-package-windows-amd64 filters: branches: only: - master - - litmus_integration: + - grace-test: + <<: *any_filter + requires: + - build-linux-amd64 + - litmus-smoke-test: + <<: *any_filter requires: - - litmus_daily + - build-linux-amd64 + - litmus-full-test: + requires: + - build-linux-amd64 + filters: + branches: + only: master + - share-testing-image: filters: branches: only: - master + requires: + - e2e-monitor-ci - hourly-e2e: + aws-destroy-daily: triggers: - schedule: - cron: "0 * * * *" + # run every day at 10pm -- note: use spaces, not tabs + cron: "0 22 * * *" filters: branches: only: - - master + - "master" jobs: - - godeps - - jsdeps - - build: - requires: - - godeps - - jsdeps - - e2e: - requires: - - build + - aws-destroy-by-date nightly: triggers: @@ -86,652 +208,665 @@ workflows: only: - master jobs: - - godeps - - jsdeps - - gotest: - requires: - - godeps - - golint: - requires: - - godeps - - influxql_validation: - requires: - - godeps - - influxql_integration: - requires: - - godeps - - jstest: - requires: - - jsdeps - - jslint: + - changelog + - s3-publish-changelog: + publish-type: nightly requires: - - jsdeps - - deploy_nightly: + - changelog + - test-race + - test-build: + name: test-build-<< matrix.os >>-<< matrix.arch >> + matrix: + parameters: + os: [ linux, darwin, windows ] + arch: [ amd64, arm64 ] + exclude: + - { os: darwin, arch: arm64 } + - { os: windows, arch: arm64 } + # linux/amd64 can be tested directly from our cross-builder image + # to save time & enable running with the race detector. + - { os: linux, arch: amd64 } + - test-prebuilt: + name: test-linux-arm64 + executor: linux-arm64 requires: - - gotest - - golint - - jstest - - jslint - - influxql_validation - - influxql_integration - filters: - branches: - only: - - master - - litmus_nightly: + - test-build-linux-arm64 + - test-prebuilt: + name: test-darwin + executor: darwin requires: - - deploy_nightly - - grace_nightly: + - test-build-darwin-amd64 + - test-prebuilt: + name: test-windows + executor: windows requires: - - deploy_nightly - - release: - jobs: - - godeps: - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - jsdeps: - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - gotest: - requires: - - godeps - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - golint: + - test-build-windows-amd64 + - lint + - fluxtest + - tlstest + - build: + name: build-nightly-<< matrix.os >>-<< matrix.arch >> + build-type: nightly + matrix: + parameters: + os: [ linux, darwin, windows ] + arch: [ amd64, arm64 ] + exclude: + - { os: darwin, arch: arm64 } + - { os: windows, arch: arm64 } + - build-docker-nightly: + name: build-docker-nightly-arm64 + resource_class: arm.medium + arch: arm64 + arch_docker: arm64v8 requires: - - godeps - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - jstest: + - build-nightly-linux-arm64 + - build-docker-nightly: + name: build-docker-nightly-amd64 + resource_class: medium + arch: amd64 + arch_docker: amd64 requires: - - jsdeps - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - jslint: + - build-nightly-linux-amd64 + - docker-nightly-manifest: requires: - - jsdeps - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - influxql_validation: + - build-docker-nightly-amd64 + - build-docker-nightly-arm64 + - build-package: + name: build-package-<< matrix.os >>-<< matrix.arch >> requires: - - godeps - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - influxql_integration: + - build-nightly-<< matrix.os >>-<< matrix.arch >> + - changelog + matrix: + parameters: + os: [ linux, darwin, windows ] + arch: [ amd64, arm64 ] + exclude: + - { os: darwin, arch: arm64 } + - { os: windows, arch: arm64 } + - litmus-full-test: requires: - - godeps - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ - - release: + - build-nightly-linux-amd64 + - grace-test: requires: - - gotest - - golint - - jstest - - jslint - - influxql_validation - - influxql_integration - filters: - branches: - ignore: /.*/ - tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+(-(rc|alpha|beta)\.[0-9]+)?$/ + - build-nightly-linux-amd64 -orbs: - # Needed to install chrome for e2e testing. - browser-tools: circleci/browser-tools@1.1 +commands: + quay_login: + steps: + - run: + name: Log in to Quay + command: docker login -u "$QUAY_USER" -p $QUAY_PASS quay.io jobs: - - #################### - ### UI-only jobs ### - #################### - - jsdeps: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + lint: + executor: cross-builder steps: - checkout - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - - yarn-deps-lock- - run: - name: Install Dependencies + name: Check go version + command: ./scripts/ci/check-system-go-matches-go-mod.sh + when: always + - run: + name: Check flag generation + command: ./scripts/ci/lint/flags.bash + when: always + - run: + name: Check SQL migrations + command: make checksqlmigrations + when: always + - run: + name: Check formatting + command: make checkfmt + when: always + - run: + name: Check codegen + command: make checkgenerate + when: always + - run: + name: vet + command: make vet + when: always + - run: + name: staticcheck command: | - set +e - cd ui - yarn install --immutable - - save_cache: - name: Save Yarn Cache - key: yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - paths: - - /home/circleci/go/src/github.com/influxdata/influxdb/ui/node_modules - - ~/.cache/yarn - - ~/.cache/Cypress + go install honnef.co/go/tools/cmd/staticcheck + staticcheck ./... + when: always - jstest: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + test-race: + executor: cross-builder + parallelism: 8 steps: - checkout - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - - run: - name: install dependencies - command: | - cd ui - # This should usually be a no-op (fully contained in the Yarn cache above), but we - # include it to be safe since `yarn test` won't auto-install missing modules. - yarn install --immutable - - run: make ui_client - run: - name: run tests - command: | - cd ui - yarn test:circleci + name: Run race tests + command: ./scripts/ci/run-race-tests.sh $(pwd)/test-results - store_test_results: - path: ui/coverage + path: ./test-results - store_artifacts: - path: ui/coverage - destination: raw-test-output + path: ./test-results + destination: raw-test-results - jslint: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb - parallelism: 8 + test-build: + executor: cross-builder + resource_class: large + parameters: + os: + type: string + arch: + type: string steps: - checkout - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - run: - name: install dependencies + name: Build test binaries command: | - cd ui - # This should usually be a no-op (fully contained in the Yarn cache above), but we - # include it to be safe since the lint commands won't auto-install missing modules. - yarn install --immutable - - run: make ui_client - - run: - name: parallel eslint - command: | - cd ui - TESTFILES=$(circleci tests glob "src/**/*.ts*" "cypress/**/*.ts*" | circleci tests split --split-by=filesize) - yarn prettier:circleci ${TESTFILES[@]} - yarn eslint:circleci ${TESTFILES[@]} - - ######################### - ### Backend-only jobs ### - ######################### + export GOOS=<< parameters.os >> + export GOARCH=<< parameters.arch >> + ./scripts/ci/build-tests.sh ./test-bin + - persist_to_workspace: + root: . + paths: + - test-bin - godeps: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - resource_class: large - environment: - GOCACHE: /tmp/go-cache - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + test-prebuilt: + parameters: + executor: + type: executor + executor: << parameters.executor >> + parallelism: 8 steps: - checkout - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - influxdb-gomod-sum- + - attach_workspace: + at: . - run: - name: Install Dependencies - command: go mod download -x - - save_cache: - name: Save GOPATH/pkg/mod - key: influxdb-gomod-sum-{{ checksum "go.sum" }} - paths: - - /home/circleci/go/pkg/mod + name: Run tests + command: ./scripts/ci/run-prebuilt-tests.sh $(pwd)/test-bin $(pwd)/test-results + - store_test_results: + path: ./test-results + - store_artifacts: + path: ./test-results + destination: raw-test-results - golint: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - environment: - GOCACHE: /tmp/go-cache - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + fluxtest: + executor: cross-builder steps: - checkout - - restore_cache: - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - run: | - # this is not in a seperate bash script because it isn't meant to be run on local. - # it just checks to make sure that the same major/minor version of go is used in the mod file as on ci - # to prevent accidentally checking in a wrong go mod version. - gomodversiondiff=$( go mod edit -go=$( go version | sed -n 's/^.*go\([0-9]*.[0-9]*\).*$/\1/p') -print |diff - go.mod ) - if [ "$gomodversiondiff" ] - then - echo unexpected go version $gomodversiondiff - exit 1 - fi - - run: make vet - - run: make checkfmt - - run: make checktidy - - run: GO111MODULE=on go mod vendor # staticcheck looks in vendor for dependencies. - - run: GO111MODULE=on go install honnef.co/go/tools/cmd/staticcheck # Install staticcheck from the version we specify in go.mod. - - run: GO111MODULE=on ./env staticcheck ./... - - gotest: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - resource_class: large - environment: - GOCACHE: /tmp/go-cache - TEST_RESULTS: /tmp/test-results - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb - parallelism: 8 + - run: make test-flux + + tlstest: + executor: cross-builder steps: - checkout - - restore_cache: - name: Restore GOCACHE - keys: - - influxdb-gotest-{{ .Branch }}-{{ .Revision }} - - influxdb-gotest-{{ .Branch }}- - - influxdb-gotest- - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - run: mkdir -p $TEST_RESULTS - - run: - name: run parallel race tests + - run: make test-tls + + build: + executor: cross-builder + parameters: + os: + type: string + arch: + type: string + build-type: + type: string + steps: + - checkout + - run: + name: Install Package Dependencies command: | - GO_TEST_CMD="gotestsum --format standard-quiet --junitfile /tmp/test-results/gotestsum.xml -- -p=4" - TESTFILES=($(go list ./... | circleci tests split --split-by=timings)) - make GO_TEST_CMD="$GO_TEST_CMD" GO_TEST_PATHS="${TESTFILES[*]}" test-go-race - - save_cache: - name: Save GOCACHE - key: influxdb-gotest-{{ .Branch }}-{{ .Revision }} - paths: - - /tmp/go-cache + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install --yes git + - run: + name: Get InfluxDB Version + command: | + PREFIX=2.x .circleci/scripts/get-version + - run: + name: Generate UI assets + command: make generate-web-assets + - run: + name: Build binaries + command: | + build_type="<< parameters.build-type >>" + + # release builds occur from the "build" pipeline + if [[ ${build_type} == snapshot ]] + then + # `get-version` determines whether this is a release build. If + # this is a release build, ensure that the proper version is + # templated into the go binary. + if [[ ${RELEASE:-} ]] + then + build_type=release + fi + fi + + export GOOS=<< parameters.os >> + export GOARCH=<< parameters.arch >> + ./scripts/ci/build.sh "bin/influxd_$(go env GOOS)_$(go env GOARCH)" "${build_type}" ./cmd/influxd - store_artifacts: - path: /tmp/test-results - destination: raw-test-output - - store_test_results: - path: /tmp/test-results + path: bin + - persist_to_workspace: + root: . + paths: + - bin - influxql_validation: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - resource_class: large - environment: - GOCACHE: /tmp/go-cache - TEST_RESULTS: /tmp/test-results - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + build-package: + executor: linux-amd64 + parameters: + os: + type: string + arch: + type: string steps: - checkout - - restore_cache: - name: Restore GOCACHE - keys: - - influxdb-iqlvalidation-{{ .Branch }}-{{ .Revision }} - - influxdb-iqlvalidation-{{ .Branch }}- - - influxdb-iqlvalidation- - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - run: mkdir -p $TEST_RESULTS - - run: make GO_TEST_CMD="gotestsum --format standard-quiet --junitfile /tmp/test-results/gotestsum.xml --" test-influxql-validation - - save_cache: - name: Save GOCACHE - key: influxdb-iqlvalidation-{{ .Branch }}-{{ .Revision }} + - attach_workspace: + at: . + - run: + name: Install Package Dependencies + command: | + export DEBIAN_FRONTEND=noninteractive + sudo apt-get update + sudo apt-get install --yes \ + build-essential \ + git \ + rpm \ + ruby-dev + + gem install fpm + - run: + name: Get InfluxDB Version + command: | + PREFIX=2.x .circleci/scripts/get-version + - run: + name: Build Package + command: | + export PLAT=<< parameters.os >> + export ARCH=<< parameters.arch >> + .circleci/scripts/build-package + - persist_to_workspace: + root: / paths: - - /tmp/go-cache + - artifacts - store_artifacts: - path: /tmp/test-results - destination: raw-test-output - - store_test_results: - path: /tmp/test-results + path: /artifacts + destination: artifacts - influxql_integration: + s3-publish-packages: docker: - - image: quay.io/influxdb/influxdb-circleci:latest - resource_class: large - environment: - GOCACHE: /tmp/go-cache - TEST_RESULTS: /tmp/test-results - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + - image: ubuntu:latest steps: + - attach_workspace: + at: /tmp/workspace - checkout - - restore_cache: - name: Restore GOCACHE - keys: - - influxdb-iqlintegration-{{ .Branch }}-{{ .Revision }} - - influxdb-iqlintegration-{{ .Branch }}- - - influxdb-iqlintegration- - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - run: mkdir -p $TEST_RESULTS - - run: make GO_TEST_CMD="gotestsum --format standard-quiet --junitfile /tmp/test-results/gotestsum.xml --" test-influxql-integration - - save_cache: - name: Save GOCACHE - key: influxdb-iqlintegration-{{ .Branch }}-{{ .Revision }} - paths: - - /tmp/go-cache - - store_artifacts: - path: /tmp/test-results - destination: raw-test-output - - store_test_results: - path: /tmp/test-results + - run: + name: Publish Packages to S3 + command: | + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install --yes awscli git - ##################################### - ### UI+backend build/release jobs ### - ##################################### + PREFIX=2.x .circleci/scripts/get-version + source "${BASH_ENV}" - build: + # required for sha256sum generate the correct paths + pushd /tmp/workspace/artifacts + + # Since the artifacts are generated in parallel, the checksums + # cannot be calculated until all artifacts are complete. This + # excludes `CHANGELOG.md`. Historically, `CHANGELOG.md` was + # not included in the checksums. + sha256sum * | tee influxdb.${VERSION}.sha256 + + aws s3 sync . 's3://dl.influxdata.com/influxdb/releases' + + s3-publish-changelog: + parameters: + publish-type: + type: string docker: - - image: quay.io/influxdb/influxdb-circleci:latest - environment: - GOCACHE: /tmp/go-cache - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + - image: ubuntu:latest steps: + - attach_workspace: + at: /tmp/workspace - checkout - - run: make checkcommit - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - restore_cache: - name: Restore GOCACHE - keys: - - influxdb-build-{{ .Branch }}-{{ .Revision }} - - influxdb-build-{{ .Branch }}- - - influxdb-build- - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - - run: make build - - save_cache: - name: Save GOCACHE - key: influxdb-build-{{ .Branch }}-{{ .Revision }} - paths: - - /tmp/go-cache - - persist_to_workspace: - root: . - paths: - - project - - bin/linux/influxd - - bin/linux/influx - - etc/litmus_success_notify.sh - - etc/litmus_fail_notify.sh + - run: + name: Publish Changelog to S3 + command: | + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install --yes awscli git - cross_build: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - environment: - GOCACHE: /tmp/go-cache + PREFIX=2.x .circleci/scripts/get-version + source "${BASH_ENV}" + + pushd /tmp/workspace/changelog_artifacts + + case "<< parameters.publish-type >>" + in + release) + aws s3 cp CHANGELOG.md "s3://dl.influxdata.com/influxdb/releases/CHANGELOG.${VERSION}.md" + ;; + nightly) + aws s3 cp CHANGELOG.md "s3://dl.influxdata.com/platform/nightlies/<< pipeline.git.branch >>/CHANGELOG.md" + ;; + esac + + build-docker-nightly: + parameters: + resource_class: + type: string + arch: + type: string + arch_docker: + type: string + machine: + image: ubuntu-2004:current + resource_class: << parameters.resource_class >> steps: + - attach_workspace: + at: /tmp/workspace - checkout - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - restore_cache: - name: Restore GOCACHE - keys: - - influxdb-cross-build-{{ .Branch }}-{{ .Revision }} - - influxdb-cross-build-{{ .Branch }}- - - influxdb-build-{{ .Branch }}-{{ .Revision }} - - influxdb-build-{{ .Branch }}- - - influxdb-cross-build- - - influxdb-build- - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - - setup_remote_docker + - quay_login - run: - name: Docker Login - command: docker login -u "$QUAY_USER" -p $QUAY_PASS quay.io - - run: make dist - - save_cache: - name: Save GOCACHE - key: influxdb-cross-build-{{ .Branch }}-{{ .Revision }} - paths: - - /tmp/go-cache + name: Install Dependencies + command: | + export DEBIAN_FRONTEND=noninteractive + sudo apt-get update + sudo apt-get install -y docker + - run: + name: Build Docker Container + command: | + # The Dockerfile requires `influxd` to build the container. This + # is written into `/tmp/workspace/bin/` by `build-binaries`. For + # the Dockerfile to build successfully, copy `influxd` to + # the current directory. + cp /tmp/workspace/bin/influxd_linux_<< parameters.arch >>/influxd . - deploy_nightly: - docker: - - image: quay.io/influxdb/influxdb-circleci:latest - environment: - GOCACHE: /tmp/go-cache - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + docker build -f docker/influxd/Dockerfile -t quay.io/influxdb/influxdb-<< parameters.arch_docker >>:nightly . + + docker push quay.io/influxdb/influxdb-<< parameters.arch_docker >>:nightly + + docker-nightly-manifest: + machine: + image: ubuntu-2004:current steps: + - run: + name: Install Dependencies + command: | + export DEBIAN_FRONTEND=noninteractive + sudo apt-get update + sudo apt-get install -y docker + - quay_login + - run: + name: Update Docker Manifests + command: | + docker manifest create \ + quay.io/influxdb/influxdb:nightly \ + quay.io/influxdb/influxdb-amd64:nightly \ + quay.io/influxdb/influxdb-arm64v8:nightly + docker manifest push \ + quay.io/influxdb/influxdb:nightly + + test-linux-packages: + executor: terraform/default + steps: + - attach_workspace: + at: /tmp/workspace - checkout - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - restore_cache: - name: Restore GOCACHE - keys: - - influxdb-nightly-{{ .Branch }}-{{ .Revision }} - - influxdb-nightly-{{ .Branch }}- - - influxdb-build-{{ .Branch }}-{{ .Revision }} - - influxdb-build-{{ .Branch }}- - - influxdb-nightly- - - influxdb-build- - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - - setup_remote_docker + - add_ssh_keys: + fingerprints: + - "91:0a:5b:a7:f9:46:77:f3:5d:4a:cf:d2:44:c8:2c:5a" + - terraform/validate: + path: scripts/ci/ - run: - name: Docker Login - command: docker login -u "$QUAY_USER" -p $QUAY_PASS quay.io + name: Terraform apply + command: | + set -x + export DEBNAME="$(find /tmp/workspace/artifacts/influxdb2-*-amd64.deb)" + terraform -chdir=scripts/ci init -input=false + AWS_ACCESS_KEY_ID=$TEST_AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY=$TEST_AWS_SECRET_ACCESS_KEY terraform \ + -chdir=scripts/ci \ + apply \ + -auto-approve \ + -var package_path=${DEBNAME} \ + -var instance_name=circleci-terraform-${CIRCLE_SHA1} \ + -var additional_files_dir=${PWD}/scripts/ci/tests/ - run: - name: Build nightly - command: make nightly - - save_cache: - name: Save GOCACHE - key: influxdb-nightly-{{ .Branch }}-{{ .Revision }} - paths: - - /tmp/go-cache - - persist_to_workspace: - root: . - paths: - - etc/litmus_success_notify.sh - - etc/litmus_fail_notify.sh + name: Install deb + command: | + set -x + export ec2_ip=$(terraform -chdir=scripts/ci output -raw test_node_ssh) + ssh -o "StrictHostKeyChecking=no" ubuntu@$ec2_ip \<< EOF + sudo apt-get update && sudo apt-get install -y /home/ubuntu/influxdb.deb + EOF + - run: + name: Run tests + command: | + set -x + export ec2_ip=$(terraform -chdir=scripts/ci output -raw test_node_ssh) + files=$(ssh -o "StrictHostKeyChecking=no" ubuntu@$ec2_ip 'find /home/ubuntu/files/ -maxdepth 1 -mindepth 1 | sort') + for file in $files; do + ssh -o "StrictHostKeyChecking=no" ubuntu@$ec2_ip "sudo $file" + done + - run: + name: Terraform destroy + when: always + command: | + AWS_ACCESS_KEY_ID=$TEST_AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY=$TEST_AWS_SECRET_ACCESS_KEY terraform \ + -chdir=scripts/ci \ + destroy \ + -auto-approve - release: + perf-test: docker: - - image: quay.io/influxdb/influxdb-circleci:latest - environment: - GOCACHE: /tmp/go-cache - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + - image: cimg/base:2022.04 + resource_class: small + parameters: + record_results: + type: boolean steps: + - attach_workspace: + at: /tmp/workspace - checkout - - restore_cache: - name: Restore GOPATH/pkg/mod - keys: - - influxdb-gomod-sum-{{ checksum "go.sum" }} - - restore_cache: - name: Restore GOCACHE - keys: - - influxdb-release-{{ .Branch }}-{{ .Revision }} - - influxdb-release-{{ .Branch }}- - - influxdb-build-{{ .Branch }}-{{ .Revision }} - - influxdb-build-{{ .Branch }}- - - influxdb-release- - - influxdb-build- - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} - - setup_remote_docker + # To ssh into aws without failing host key checks + - add_ssh_keys: + fingerprints: + - "91:0a:5b:a7:f9:46:77:f3:5d:4a:cf:d2:44:c8:2c:5a" - run: - name: Docker Login - command: docker login -u "$QUAY_USER" -p $QUAY_PASS quay.io + name: Set up AWS CLI + command: | + ./scripts/ci/install-aws-cli.sh + echo "export AWS_ACCESS_KEY_ID=${TEST_AWS_ACCESS_KEY_ID}" >> vars.sh + echo "export AWS_SECRET_ACCESS_KEY=${TEST_AWS_SECRET_ACCESS_KEY}" >> vars.sh + echo "export TEST_RECORD_RESULTS=<< parameters.record_results >>" >> vars.sh + echo 'export DATA_I_TYPE="r5.2xlarge"' >> vars.sh - run: - name: import GPG key + name: Set up AWS instance command: | - echo -e "$GPG_PRIVATE_KEY" > private.key - gpg --batch --import private.key - - run: - name: Build release - command: make release - - save_cache: - name: Save GOCACHE - key: influxdb-release-{{ .Branch }}-{{ .Revision }} - paths: - - /tmp/go-cache + scripts/ci/perf_test.sh + - run: + name: Run perf test + no_output_timeout: 20m + command: | + source vars.sh + set -x + ssh "ubuntu@$EC2_IP" "sudo ./run_perftest.sh" + - run: + name: Tear down AWS instance + when: always + command: | + source vars.sh + set -x + if [[ -n "$EC2_INSTANCE_ID" ]] ; then + aws --region us-west-2 ec2 terminate-instances --instance-ids "$EC2_INSTANCE_ID" + fi - ################################# - ### e2e/integration test jobs ### - ################################# + aws-destroy-by-date: + executor: linux-amd64 + steps: + - attach_workspace: + at: /tmp/workspace + - checkout + - add_ssh_keys: + fingerprints: + - "91:0a:5b:a7:f9:46:77:f3:5d:4a:cf:d2:44:c8:2c:5a" + - run: + name: Destroy AWS instances with datestring more than a day old + no_output_timeout: 20m + command: | + set -x + yesterday_date=$(date --date "yesterday" +%Y%m%d) + instance_info=$(AWS_ACCESS_KEY_ID=${TEST_AWS_ACCESS_KEY_ID} \ + AWS_SECRET_ACCESS_KEY=${TEST_AWS_SECRET_ACCESS_KEY} \ + aws --region us-west-2 ec2 describe-instances \ + --filters "Name=tag:Name,Values=oss-perftest-*" \ + --query "Reservations[].Instances[].[InstanceId, Tags[?Key=='Name']|[0].Value]" \ + --output text) + while [ -n "$instance_info" ]; do + instance_id=$(echo $instance_info | tr -s ' ' | cut -d ' ' -f1) + name=$(echo $instance_info | tr -s ' ' | cut -d ' ' -f2) + instance_info=$(echo $instance_info | tr -s ' ' | cut -d ' ' -f3-) + date=$(echo $name | cut -d '-' -f3) + if [ $date -le $yesterday_date ]; then + AWS_ACCESS_KEY_ID=${TEST_AWS_ACCESS_KEY_ID} AWS_SECRET_ACCESS_KEY=${TEST_AWS_SECRET_ACCESS_KEY} aws --region us-west-2 ec2 terminate-instances --instance-ids $instance_id + fi + done - e2e: + e2e-monitor-ci: docker: - - image: quay.io/influxdb/influxdb-circleci:latest - environment: - GOCACHE: /tmp/go-cache - working_directory: /home/circleci/go/src/github.com/influxdata/influxdb + - image: cimg/base:stable steps: - - browser-tools/install-browser-tools - checkout - attach_workspace: - at: /home/circleci/go/src/github.com/influxdata/influxdb - - restore_cache: - name: Restore Yarn Cache - keys: - - yarn-deps-lock-{{ checksum "ui/yarn.lock" }} + at: . + - setup_remote_docker + - quay_login - run: - command: ./bin/linux/influxd --store=memory --e2e-testing=true --feature-flags=communityTemplates=true - background: true - - run: make e2e - - store_test_results: - path: ui/junit-results + name: Build and push candidate docker image + command: | + cp bin/influxd_linux_amd64/influxd . + docker build -f docker/influxd/Dockerfile -t quay.io/influxdb/oss-acceptance:${CIRCLE_SHA1} . + docker push quay.io/influxdb/oss-acceptance:${CIRCLE_SHA1} + - run: + name: Run the tests + command: API_KEY=${MONITOR_CI_API_KEY} MONITOR_CI_BRANCH="master" OSS_SHA=${CIRCLE_SHA1} RUN_WORKFLOW="build_oss" ./scripts/ci/run-monitor-ci-tests.bash - store_artifacts: - path: ui/cypress/videos - destination: videos + path: monitor-ci/test-artifacts/results/build-oss-image + destination: test_artifacts/results/build-oss-image - store_artifacts: - path: ui/cypress/screenshots - destination: screenshots + path: monitor-ci/test-artifacts/results/oss-e2e + destination: test_artifacts/results/oss-e2e + - store_artifacts: + path: monitor-ci/test-artifacts/results/shared + destination: test_artifacts/results/shared - litmus_daily: - machine: true + litmus-smoke-test: + executor: linux-amd64 steps: - attach_workspace: at: ~/project - - run: docker login -u=$QUAY_USER -p=$QUAY_PASS quay.io - - run: docker run --entrypoint "./run_litmus_tests_oss.sh" -e ONE_TEST=src/cloud/rest_api/smoke/test_smoke.py -e BINARYPATH=/Litmus/result/bin/linux/influxd -e BOLTPATH=/Litmus/result/influxd_test/influxd.bolt -e ENGINEPATH=/Litmus/result/influxd_test --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/Litmus/result quay.io/influxdb/litmus:latest - - run: - name: Litmus Smoke Tests Success - when: on_success - command: bash ~/project/etc/litmus_success_notify.sh Smoke - path: ~/project - - run: - name: Litmus Smoke Tests Fail - when: on_fail - command: bash ~/project/etc/litmus_fail_notify.sh Smoke + - run: docker run --entrypoint "./run_litmus_tests_oss.sh" -e ONE_TEST=src/cloud/rest_api/smoke/test_smoke.py -e BINARYPATH=/Litmus/result/bin/influxd_linux_amd64/influxd -e BOLTPATH=/Litmus/result/influxd_test/influxd.bolt -e ENGINEPATH=/Litmus/result/influxd_test --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/Litmus/result quay.io/influxdb/litmus:latest - store_artifacts: path: ~/project destination: raw-daily-output - store_test_results: path: ~/project - litmus_integration: - machine: true + litmus-full-test: + executor: linux-amd64 steps: - attach_workspace: at: ~/project - - run: docker login -u=$QUAY_USER -p=$QUAY_PASS quay.io - - run: docker run --entrypoint "./run_litmus_tests_oss.sh" -e TEST_LIST=tests_lists/gateway_api_tests.list -e INFLUXPATH=/Litmus/result/bin/linux/influx -e BINARYPATH=/Litmus/result/bin/linux/influxd -e BOLTPATH=/tmp/influxd_test/influxd.bolt -e ENGINEPATH=/tmp/influxd_test --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/Litmus/result quay.io/influxdb/litmus:latest - - run: - name: Litmus Integration Tests Success - when: on_success - command: bash ~/project/etc/litmus_success_notify.sh Integration - - run: - name: Litmus Integration Tests Failure - when: on_fail - command: bash ~/project/etc/litmus_fail_notify.sh Integration + - run: docker run --entrypoint "./run_litmus_tests_oss.sh" -e TEST_LIST=tests_lists/gateway_api_tests.list -e BINARYPATH=/Litmus/result/bin/influxd_linux_amd64/influxd -e BOLTPATH=/tmp/influxd_test/influxd.bolt -e ENGINEPATH=/tmp/influxd_test --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/Litmus/result quay.io/influxdb/litmus:latest - store_artifacts: path: ~/project destination: raw-daily-output - store_test_results: path: ~/project - litmus_nightly: - machine: true + grace-test: + executor: linux-amd64 steps: - attach_workspace: at: ~/project - - run: docker login -u=$QUAY_USER -p=$QUAY_PASS quay.io - - run: docker run --entrypoint "./run_litmus_tests_oss.sh" -e TEST_LIST=tests_lists/gateway_api_tests.list -e DOCKERIMAGE=true --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/Litmus/result quay.io/influxdb/litmus:latest - run: - name: Litmus Nightly Tests Success - when: on_success - command: bash ~/project/etc/litmus_success_notify.sh Nightly + command: ./bin/influxd_linux_amd64/influxd --store=memory --log-level=debug + background: true + - run: mkdir -p ~/project/results + - run: + name: Wait for influxd to bind HTTP port + command: | + attempts=0 + max_attempts=30 + while ! curl localhost:8086/health; do + attempts=$((attempts+1)) + if [[ $attempts = $max_attempts ]]; then + >&2 echo influxd "didn't" start in time + exit 1 + fi + sleep 1 + done - run: - name: Litmus Nightly Tests Fail - when: on_fail - command: bash ~/project/etc/litmus_fail_notify.sh Nightly + name: Run grace test driver + command: | + docker run -v ~/project/results:/grace/test-results/grace-results \ + --env GRACE_BASE_URL="http://172.17.0.1:8086" \ + --env GRACE_ORG_NAME="daily-org" \ + --env GRACE_BUCKET_NAME="daily-bucket" \ + --env GRACE_USER="daily@influxdata.com" \ + --env GRACE_PASS="dailyPassword" \ + --env GRACE_VALIDATE_OPENAPI=1 \ + quay.io/influxdb/grace:latest-cd - store_artifacts: - path: ~/project - destination: raw-nightly-output + path: ~/project/results - store_test_results: - path: ~/project + path: ~/project/results - grace_nightly: - machine: true + test-downgrade: + executor: cross-builder steps: + - checkout - attach_workspace: - at: ~/project - - run: docker login -u=$QUAY_USER -p=$QUAY_PASS quay.io - - run: docker run --net host -v /var/run/docker.sock:/var/run/docker.sock -e TEST_RESULTS=~/project quay.io/influxdb/grace:latest - - store_artifacts: - path: ~/project - - store_test_results: - path: ~/project + at: . + - run: + name: Run downgrade tests + command: | + ./scripts/ci/test-downgrade.sh $(pwd)/bin/influxd_linux_amd64/influxd - grace_daily: - machine: true + share-testing-image: + docker: + - image: cimg/base:stable steps: - - attach_workspace: - at: ~/project - - run: docker login -u=$QUAY_USER -p=$QUAY_PASS quay.io + - setup_remote_docker + - quay_login - run: - command: ./bin/linux/influxd --store=memory --log-level=debug - background: true - - run: docker run --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/grace/test-results/grace-results quay.io/influxdb/grace:daily + name: Push the image to Quay + command: | + docker pull quay.io/influxdb/oss-acceptance:${CIRCLE_SHA1} + docker tag quay.io/influxdb/oss-acceptance:${CIRCLE_SHA1} quay.io/influxdb/oss-acceptance:latest + docker push quay.io/influxdb/oss-acceptance:latest + + changelog: + docker: + - image: quay.io/influxdb/changelogger:d7093c409adedd8837ef51fa84be0d0f8319177a + steps: + - checkout + - run: + name: Generate changelog + command: | + PREFIX=2.x .circleci/scripts/get-version + source "${BASH_ENV}" + + if [[ "${RELEASE:-}" ]] + then + export DESCRIPTION="In addition to the list of changes below, please also see the [official release notes](https://docs.influxdata.com/influxdb/${VERSION}/reference/release-notes/influxdb/) for other important information about this release." + fi + + PRODUCT="OSS" changelogger - store_artifacts: - path: ~/project + path: changelog_artifacts/ + - persist_to_workspace: + root: . + paths: + - changelog_artifacts diff --git a/.circleci/package/control/postinst b/.circleci/package/control/postinst new file mode 100644 index 00000000000..8862ec34b44 --- /dev/null +++ b/.circleci/package/control/postinst @@ -0,0 +1,142 @@ +#!/bin/bash + +BIN_DIR=/usr/bin +DATA_DIR=/var/lib/influxdb +LOG_DIR=/var/log/influxdb +SCRIPT_DIR=/usr/lib/influxdb/scripts +LOGROTATE_DIR=/etc/logrotate.d +INFLUXD_CONFIG_PATH=/etc/influxdb/config.toml + +function install_init { + cp -f $SCRIPT_DIR/init.sh /etc/init.d/influxdb + chmod +x /etc/init.d/influxdb +} + +function install_systemd { + cp -f $SCRIPT_DIR/influxdb.service /lib/systemd/system/influxdb.service + systemctl enable influxdb +} + +function install_update_rcd { + update-rc.d influxdb defaults +} + +function install_chkconfig { + chkconfig --add influxdb +} + +function should_upgrade { + if [[ ! -s /etc/influxdb/influxdb.conf ]]; then + # No V1 config present, no upgrade needed. + return 1 + fi + + bolt_dir="/root/.influxdbv2 /var/lib/influxdb/.influxdbv2 /var/lib/influxdb" + for bolt in $bolt_dir; do + if [[ -s ${bolt}/influxd.bolt ]]; then + # Found a bolt file, assume previous v2 upgrade. + return 1 + fi + done + + return 0 +} + +function upgrade_notice { +cat << EOF + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! Important 1.x to 2.x Upgrade Notice ! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +Thank you for installing InfluxDB v2. Due to significant changes between +the v1 and v2 versions, upgrading to v2 requires additional steps. If +upgrading to v2 was not intended, simply re-install the v1 package now. + +An upgrade helper script is available that should be reviewed and executed +prior to starting the influxdb systemd service. In order to start the v2 +upgrade, execute the following: + +sudo /usr/share/influxdb/influxdb2-upgrade.sh + +Visit our website for complete details on the v1 to v2 upgrade process: +https://docs.influxdata.com/influxdb/latest/upgrade/v1-to-v2/ + +For new or upgrade installations, please review the getting started guide: +https://docs.influxdata.com/influxdb/latest/get-started/ + +EOF +} + +function init_config { + mkdir -p $(dirname ${INFLUXD_CONFIG_PATH}) + + local config_path=${INFLUXD_CONFIG_PATH} + if [[ -s ${config_path} ]]; then + config_path=${INFLUXD_CONFIG_PATH}.defaults + echo "Config file ${INFLUXD_CONFIG_PATH} already exists, writing defaults to ${config_path}" + fi + + cat << EOF > ${config_path} +bolt-path = "/var/lib/influxdb/influxd.bolt" +engine-path = "/var/lib/influxdb/engine" +EOF +} + +# Add defaults file, if it doesn't exist +if [[ ! -s /etc/default/influxdb2 ]]; then +cat << EOF > /etc/default/influxdb2 +INFLUXD_CONFIG_PATH=${INFLUXD_CONFIG_PATH} +EOF +fi + +# Remove legacy symlink, if it exists +if [[ -L /etc/init.d/influxdb ]]; then + rm -f /etc/init.d/influxdb +fi + +# Distribution-specific logic +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + if command -v systemctl &>/dev/null; then + install_systemd + else + # Assuming sysv + install_init + install_chkconfig + fi +elif [[ -f /etc/debian_version ]]; then + # Ownership for RH-based platforms is set in build.py via the `rmp-attr` option. + # We perform ownership change only for Debian-based systems. + # Moving these lines out of this if statement would make `rmp -V` fail after installation. + chown -R -L influxdb:influxdb $LOG_DIR + chown -R -L influxdb:influxdb $DATA_DIR + chmod 750 $LOG_DIR + chmod 750 $DATA_DIR + + # Debian/Ubuntu logic + if command -v systemctl &>/dev/null; then + install_systemd + else + # Assuming sysv + install_init + install_update_rcd + fi +elif [[ -f /etc/os-release ]]; then + source /etc/os-release + if [[ "$NAME" = "Amazon Linux" ]]; then + # Amazon Linux 2+ logic + install_systemd + elif [[ "$NAME" = "Amazon Linux AMI" ]]; then + # Amazon Linux logic + install_init + install_chkconfig + fi +fi + +# Check upgrade status +if should_upgrade; then + upgrade_notice +else + init_config +fi diff --git a/scripts/post-uninstall.sh b/.circleci/package/control/postrm similarity index 100% rename from scripts/post-uninstall.sh rename to .circleci/package/control/postrm diff --git a/.circleci/package/control/preinst b/.circleci/package/control/preinst new file mode 100644 index 00000000000..271b5ad59f2 --- /dev/null +++ b/.circleci/package/control/preinst @@ -0,0 +1,22 @@ +#!/bin/bash + +DATA_DIR=/var/lib/influxdb +USER=influxdb +GROUP=influxdb +LOG_DIR=/var/log/influxdb + +if ! id influxdb &>/dev/null; then + useradd --system -U -M influxdb -s /bin/false -d $DATA_DIR +fi + +# check if DATA_DIR exists +if [ ! -d "$DATA_DIR" ]; then + mkdir -p $DATA_DIR + chown $USER:$GROUP $DATA_DIR +fi + +# check if LOG_DIR exists +if [ ! -d "$LOG_DIR" ]; then + mkdir -p $LOG_DIR + chown $USER:$GROUP $DATA_DIR +fi diff --git a/scripts/logrotate b/.circleci/package/fs/etc/logrotate.d/influxdb similarity index 100% rename from scripts/logrotate rename to .circleci/package/fs/etc/logrotate.d/influxdb diff --git a/.circleci/package/fs/usr/lib/influxdb/scripts/influxd-systemd-start.sh b/.circleci/package/fs/usr/lib/influxdb/scripts/influxd-systemd-start.sh new file mode 100755 index 00000000000..001d6641db3 --- /dev/null +++ b/.circleci/package/fs/usr/lib/influxdb/scripts/influxd-systemd-start.sh @@ -0,0 +1,30 @@ +#!/bin/bash -e + +/usr/bin/influxd & +PID=$! +echo $PID > /var/lib/influxdb/influxd.pid + +PROTOCOL="http" +BIND_ADDRESS=$(influxd print-config --key-name http-bind-address) +TLS_CERT=$(influxd print-config --key-name tls-cert | tr -d '"') +TLS_KEY=$(influxd print-config --key-name tls-key | tr -d '"') +if [ -n "${TLS_CERT}" ] && [ -n "${TLS_KEY}" ]; then + echo "TLS cert and key found -- using https" + PROTOCOL="https" +fi +HOST=${BIND_ADDRESS%:*} +HOST=${HOST:-"localhost"} +PORT=${BIND_ADDRESS##*:} + +set +e +attempts=0 +url="$PROTOCOL://$HOST:$PORT/ready" +result=$(curl -k -s -o /dev/null $url -w %{http_code}) +while [ "${result:0:2}" != "20" ] && [ "${result:0:2}" != "40" ]; do + attempts=$(($attempts+1)) + echo "InfluxDB API at $url unavailable after $attempts attempts..." + sleep 1 + result=$(curl -k -s -o /dev/null $url -w %{http_code}) +done +echo "InfluxDB started" +set -e diff --git a/scripts/influxdb.service b/.circleci/package/fs/usr/lib/influxdb/scripts/influxdb.service similarity index 77% rename from scripts/influxdb.service rename to .circleci/package/fs/usr/lib/influxdb/scripts/influxdb.service index 063d7034a51..d87052c1980 100644 --- a/scripts/influxdb.service +++ b/.circleci/package/fs/usr/lib/influxdb/scripts/influxdb.service @@ -10,9 +10,12 @@ User=influxdb Group=influxdb LimitNOFILE=65536 EnvironmentFile=-/etc/default/influxdb2 -ExecStart=/usr/bin/influxd +ExecStart=/usr/lib/influxdb/scripts/influxd-systemd-start.sh KillMode=control-group Restart=on-failure +Type=forking +PIDFile=/var/lib/influxdb/influxd.pid +UMask=0027 [Install] WantedBy=multi-user.target diff --git a/scripts/init.sh b/.circleci/package/fs/usr/lib/influxdb/scripts/init.sh similarity index 100% rename from scripts/init.sh rename to .circleci/package/fs/usr/lib/influxdb/scripts/init.sh diff --git a/scripts/influxdb2-upgrade.sh b/.circleci/package/fs/usr/share/influxdb/influxdb2-upgrade.sh similarity index 100% rename from scripts/influxdb2-upgrade.sh rename to .circleci/package/fs/usr/share/influxdb/influxdb2-upgrade.sh diff --git a/query/influxql/testdata/series_agg_5.in.json b/.circleci/package/fs/var/lib/influxdb/.keep similarity index 100% rename from query/influxql/testdata/series_agg_5.in.json rename to .circleci/package/fs/var/lib/influxdb/.keep diff --git a/.circleci/scripts/build-package b/.circleci/scripts/build-package new file mode 100755 index 00000000000..bedabee4adc --- /dev/null +++ b/.circleci/scripts/build-package @@ -0,0 +1,170 @@ +#!/bin/bash +set -o errexit \ + -o nounset \ + -o pipefail + +REGEX_RELEASE_VERSION='[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+' + +if [[ ${RELEASE:-} ]] +then + # This ensures that release packages are built with valid versions. + # Unfortunately, `fpm` is fairly permissive with what version tags + # it accepts. This becomes a problem when `apt` or `dpkg` is used + # to install the package (both have strict version requirements). + if ! [[ ${VERSION} =~ ^${REGEX_RELEASE_VERSION}$ ]] + then + printf 'Release version is invalid!\n' >&2 && exit 1 + fi +fi + +function run_fpm() +{ + if [[ ${1} == rpm ]] + then + case ${ARCH} in + arm64) + ARCH=aarch64 + ;; + amd64) + ARCH=x86_64 + ;; + esac + fi + + pushd "${workspace}" + + fpm \ + --log error \ + `# package description` \ + --name influxdb2 \ + --vendor InfluxData \ + --description 'Distributed time-series database.' \ + --url https://influxdata.com \ + --maintainer support@influxdb.com \ + --license MIT \ + `# package configuration` \ + --input-type dir \ + --output-type "${1}" \ + --architecture "${ARCH}" \ + --version "${VERSION}" \ + --iteration 1 \ + `# package relationships` \ + --deb-recommends influxdb2-cli \ + --conflicts influxdb \ + --depends curl \ + `# package scripts` \ + --before-install control/preinst \ + --after-install control/postinst \ + --after-remove control/postrm \ + `# package files` \ + --chdir fs/ \ + --package /artifacts \ + --directories /var/lib/influxdb \ + --rpm-defattrdir 750 \ + --rpm-defattrfile 750 + + popd + + # `goreleaser` stripped off the package revision and replaced '_' with + # '-'. Since the dockerfiles expect the previous naming convention, + # this rewrites the package names to match. Version information is + # also stored as metadata within the package. + case ${1} in + deb) + mv "/artifacts/influxdb2_${VERSION}-1_${ARCH}.deb" \ + "/artifacts/influxdb2-${VERSION}-${ARCH}.deb" + ;; + rpm) + mv "/artifacts/influxdb2-${VERSION//-/_}-1.${ARCH}.rpm" \ + "/artifacts/influxdb2-${VERSION//-/_}.${ARCH}.rpm" + ;; + esac +} + +sudo bash <<'EOF' +mkdir /artifacts && chown -R circleci: /artifacts +EOF + +build_archive() +{ + workspace="$(mktemp -d)" + + mkdir "${workspace}/influxdb2_${PLAT}_${ARCH}" + + # `failglob` is required because `bin/influxd_${PLAT}_${ARCH}/*` may + # not expand. This will prevent the package from being built without + # the included binary files. This will also display as an error + # from CircleCI interface. + shopt -s failglob + cp -p LICENSE README.md "bin/influxd_${PLAT}_${ARCH}/"* \ + "${workspace}/influxdb2_${PLAT}_${ARCH}/" + + pushd "${workspace}" + + if [[ ${PLAT} != windows ]] + then + # Using `find .. -type f` to supply a list of files to `tar` serves two + # purposes. The first being that `tar` wont construct a '.' directory + # in the root of the tarfile. The second being that this excludes + # empty directories from the tarfile. + find "influxdb2_${PLAT}_${ARCH}/" -type f \ + | tar -czf "/artifacts/influxdb2-${VERSION}-${PLAT}-${ARCH}.tar.gz" -T - + else + # windows uses zip + find "influxdb2_${PLAT}_${ARCH}/" -type f \ + | zip -r "/artifacts/influxdb2-${VERSION}-${PLAT}-${ARCH}.zip" -@ + fi + + popd +} + +build_package_linux() +{ + if [[ ${PLAT} != linux ]] + then + return 0 + fi + + workspace="$(mktemp -d)" + + mkdir -p "${workspace}/fs/usr/bin" + + # (see reasoning above) + shopt -s failglob + cp -rp .circleci/package/. "${workspace}/" + cp -p "bin/influxd_${PLAT}_${ARCH}/"* "${workspace}/fs/usr/bin" + + run_fpm deb + run_fpm rpm +} + +sign_artifacts() +{ + # If this is not a release version, don't sign the artifacts. This + # prevents unathorized PRs and branches from being signed with our + # signing key. + if [[ ! ${RELEASE:-} ]] + then + return 0 + fi + + # CircleCI mangles environment variables with newlines. This key contians + # escaped newlines. For `gpg` to import the key, it requires `echo -e` to + # expand the escape sequences. + gpg --batch --import <<<"$(echo -e "${GPG_PRIVATE_KEY}")" + + # TODO(bnpfeife): replace with code signing server + for target in /artifacts/* + do + gpg \ + --batch \ + --pinentry-mode=loopback \ + --passphrase "${PASSPHRASE}" \ + --detach-sign \ + --armor "${target}" + done +} + +build_archive +build_package_linux +sign_artifacts diff --git a/.circleci/scripts/get-version b/.circleci/scripts/get-version new file mode 100755 index 00000000000..8f834a9f3ea --- /dev/null +++ b/.circleci/scripts/get-version @@ -0,0 +1,48 @@ +#!/bin/bash +set -o nounset \ + -o errexit \ + -o pipefail + +REGEX_TAG='v([[:digit:]]+)\.([[:digit:]]+)\.([[:digit:]]+)' + +function semver_tags() +{ + { + # Sometimes several release tags point to the same commit (see v1.9.0 and + # v1.9.1). This iterates through each tag and ensures that it conforms to + # semantic versioning requirements. Afterwards, the tags are sorted from + # latest to earliest. This is so packages use the latest version tag. + for tag in $(git tag --points-at HEAD) + do + if [[ ${tag} =~ ^${REGEX_TAG}$ ]] + then + printf '%s\n' "${tag}" + fi + done + } | sort --version-sort --reverse +} + +TAG=$(head -n 1 <<<"$(semver_tags)") + +# If no tag could be found for the corresponding commit, assume that this +# is being built from an unversioned branch. In which case, this will +# construct a version that is compatible with Debian versioning: +# ${PREFIX}-<< short commit hash >> +if [[ ${TAG} =~ ^${REGEX_TAG}$ ]] +then + cat <>"${BASH_ENV}" +export VERSION="${TAG:1}" +export MAJOR=${BASH_REMATCH[1]} +export MINOR=${BASH_REMATCH[2]} +export PATCH=${BASH_REMATCH[3]} +export RELEASE=1 +EOF +else + cat <>"${BASH_ENV}" +export VERSION="${PREFIX}-$(git rev-parse --short HEAD)" +export MAJOR= +export MINOR= +export PATCH= +export RELEASE= +EOF +fi diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index f8b0c2f5ea9..37027a0af7c 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -49,9 +49,8 @@ Generate profiles with the following commands for bugs related to performance, l ```sh # Commands should be run when the bug is actively happening. -# Note: This command will run for at least 30 seconds. -curl -o profiles.tar.gz "http://localhost:8086/debug/pprof/all?cpu=true" -curl -o vars.txt "http://localhost:8086/debug/vars" +# Note: This command will run for ~30 seconds. +curl -o profiles.tar.gz "http://localhost:8086/debug/pprof/all?cpu=30s" iostat -xd 1 30 > iostat.txt -# Attach the `profiles.tar.gz`, `vars.txt`, and `iostat.txt` output files. +# Attach the `profiles.tar.gz` and `iostat.txt` output files. ``` diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 017bb30f884..a90cd3e703c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,14 +1,30 @@ -Closes # +- Closes # +### Required checklist +- [ ] Sample config files updated (both `/etc` folder and `NewDemoConfig` methods) (influxdb and plutonium) +- [ ] openapi swagger.yml updated (if modified API) - link openapi PR +- [ ] Signed [CLA](https://influxdata.com/community/cla/) (if not already signed) -Describe your proposed changes here. +### Description +1-3 sentences describing the PR (or link to well written issue) - +### Context +Why was this added? What value does it add? What are risks/best practices? -- [ ] [CHANGELOG.md](https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md) updated with a link to the PR (not the Issue) -- [ ] [Well-formatted commit messages](https://www.conventionalcommits.org/en/v1.0.0-beta.3/) -- [ ] Rebased/mergeable -- [ ] Tests pass -- [ ] http/swagger.yml updated (if modified Go structs or API) -- [ ] Feature flagged (if modified API) -- [ ] Documentation updated or issue created (provide link to issue/pr) -- [ ] Signed [CLA](https://influxdata.com/community/cla/) (if not already signed) +### Affected areas (delete section if not relevant): +List of user-visible changes. As a user, what would I need to see in docs? +Examples: +CLI commands, subcommands, and flags +API changes +Configuration (sample config blocks) + +### Severity (delete section if not relevant) + i.e., ("recommend to upgrade immediately", "upgrade at your leisure", etc.) + +### Note for reviewers: +Check the semantic commit type: + - Feat: a feature with user-visible changes + - Fix: a bug fix that we might tell a user “upgrade to get this fix for your issue” + - Chore: version bumps, internal doc (e.g. README) changes, code comment updates, code formatting fixes… must not be user facing (except dependency version changes) + - Build: build script changes, CI config changes, build tool updates + - Refactor: non-user-visible refactoring + - Check the PR title: we should be able to put this as a one-liner in the release notes diff --git a/.github/semantic.yml b/.github/semantic.yml deleted file mode 100644 index 4234d444efa..00000000000 --- a/.github/semantic.yml +++ /dev/null @@ -1,4 +0,0 @@ -# docs: https://github.com/probot/semantic-pull-requests#configuration -# Always validate the PR title AND all the commits -titleAndCommits: true -allowMergeCommits: true diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml new file mode 100644 index 00000000000..5fbae6dc995 --- /dev/null +++ b/.github/workflows/semantic.yml @@ -0,0 +1,10 @@ +--- +name: "Semantic PR and Commit Messages" + +on: + pull_request: + types: [opened, reopened, synchronize, edited] + +jobs: + semantic: + uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main diff --git a/.gitignore b/.gitignore index f066bdaed6e..7dd8d22efeb 100644 --- a/.gitignore +++ b/.gitignore @@ -13,10 +13,16 @@ vendor # binary databases influxd.bolt *.db +*.sqlite -# GPG private keys +# Files generated in CI +rustup-init.sh private.key +# TLS keys generated for testing +test.crt +test.key + # Project distribution /dist @@ -26,39 +32,22 @@ private.key /fluxd /transpilerd /bin +/internal/cmd/kvmigrate/kvmigrate # Project tools that you might install with go build. /editorconfig-checker /staticcheck -ui/node_modules -ui/coverage -ui/npm-error.log -ui/build -ui/.cache -ui/package-lock.json -ui/junit.xml - -# e2e test artifacts -ui/cypress/screenshots -ui/cypress/videos - -ui/src/api/.gitignore -ui/src/api/.openapi-generator-ignore -ui/src/api/.openapi-generator/VERSION -ui/src/api/git_push.sh - -# UI generated typescript types -ui/src/client/generatedRoutes.ts - -http/swagger_gen.go +# Generated static assets +/static/data +/static/static_gen.go +/changelog_artifacts # The below files are generated with make generate # These are used with the assests go build tag. chronograf/canned/bin_gen.go chronograf/dist/dist_gen.go chronograf/server/swagger_gen.go -http/swagger_gen.go # Ignore TSM/TSI testdata binary files tsdb/tsi1/testdata @@ -136,8 +125,6 @@ man/*.1.gz # test outputs /test-results.xml junit-results -cypress/screenshots -cypress/videos # profile data /prof @@ -154,6 +141,5 @@ cypress/videos .influxdbv2/ .profile .rustup/ -.yarnrc go/ goreleaser-install diff --git a/.goreleaser-nightly.yml b/.goreleaser-nightly.yml deleted file mode 100644 index 77dc0b3ae34..00000000000 --- a/.goreleaser-nightly.yml +++ /dev/null @@ -1,143 +0,0 @@ -project_name: influxdb2 -builds: - - id: influx - goos: - - linux - - darwin - goarch: - - amd64 - - arm64 - ignore: - - goos: darwin - goarch: arm64 - main: ./cmd/influx/ - flags: - - -tags={{if eq .Os "linux"}}osusergo,netgo,static_build{{if not (eq .Arch "amd64")}},noasm{{end}}{{end}} - env: - - GO111MODULE=on - - CGO_ENABLED=1 - - CC=xcc - - PKG_CONFIG=$GOPATH/bin/pkg-config - - MACOSX_DEPLOYMENT_TARGET=10.11 - ldflags: - - -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} {{if eq .Os "linux"}}-extldflags "-fno-PIC -static -Wl,-z,stack-size=8388608"{{end}} - binary: influx - - - id: influxd - goos: - - linux - - darwin - goarch: - - amd64 - - arm64 - ignore: - - goos: darwin - goarch: arm64 - main: ./cmd/influxd/ - flags: - - -tags=assets{{if eq .Os "linux"}},osusergo,netgo,static_build{{if not (eq .Arch "amd64")}},noasm{{end}}{{end}} - env: - - GO111MODULE=on - - CGO_ENABLED=1 - - CC=xcc - - PKG_CONFIG=$GOPATH/bin/pkg-config - - MACOSX_DEPLOYMENT_TARGET=10.11 - ldflags: - - -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} {{if eq .Os "linux"}}-extldflags "-fno-PIC -static -Wl,-z,stack-size=8388608"{{end}} - binary: influxd - hooks: - pre: make generate - -nfpms: - - id: "influxdb2" - builds: ["influx", "influxd"] - formats: - - deb - - rpm - bindir: /usr/bin - contents: - - src: scripts/init.sh - dst: /usr/lib/influxdb/scripts/init.sh - - src: scripts/influxdb.service - dst: /usr/lib/influxdb/scripts/influxdb.service - - src: scripts/logrotate - dst: /etc/logrotate.d/influxdb - - src: scripts/influxdb2-upgrade.sh - dst: /usr/share/influxdb/influxdb2-upgrade.sh - scripts: - preinstall: "scripts/pre-install.sh" - postinstall: "scripts/post-install.sh" - postremove: "scripts/post-uninstall.sh" - conflicts: - - influxdb - overrides: - rpm: - replacements: - amd64: x86_64 - file_name_template: "influxdb2-nightly.{{ .Arch }}" - deb: - file_name_template: "influxdb2_nightly_{{ .Arch }}" - vendor: InfluxData - homepage: https://influxdata.com - maintainer: support@influxdb.com - description: Distributed time-series database. - license: MIT - -archives: - - format: tar.gz - wrap_in_directory: true - format_overrides: - - goos: windows - format: zip - name_template: "influxdb2_nightly_{{ .Os }}_{{ .Arch }}" - files: - - LICENSE - - README.md - -blobs: - - provider: "s3" - bucket: "dl.influxdata.com" - region: "us-east-1" - folder: "platform/nightlies/" - -checksum: - name_template: "influxdb2_nightly.sha256" - algorithm: sha256 - -dockers: - - goos: linux - goarch: amd64 - binaries: - - influxd - - influx - image_templates: - - "quay.io/influxdb/influxdb-amd64:nightly" - dockerfile: docker/influxd/Dockerfile - extra_files: - - docker/influxd/entrypoint.sh - build_flag_templates: - - "--platform=linux/amd64" - use_buildx: true - - goos: linux - goarch: arm64 - binaries: - - influxd - - influx - image_templates: - - "quay.io/influxdb/influxdb-arm64v8:nightly" - dockerfile: docker/influxd/Dockerfile - extra_files: - - docker/influxd/entrypoint.sh - build_flag_templates: - - "--platform=linux/arm64/v8" - use_buildx: true - -docker_manifests: - - name_template: "quay.io/influxdb/influxdb:nightly" - image_templates: - - "quay.io/influxdb/influxdb-amd64:nightly" - - "quay.io/influxdb/influxdb-arm64v8:nightly" - -# Do not make github release -release: - disable: true diff --git a/.goreleaser.yml b/.goreleaser.yml deleted file mode 100644 index 4b10d402e02..00000000000 --- a/.goreleaser.yml +++ /dev/null @@ -1,164 +0,0 @@ -project_name: influxdb2 -builds: - - id: influx - goos: - - linux - - darwin - goarch: - - amd64 - - arm64 - ignore: - - goos: darwin - goarch: arm64 - main: ./cmd/influx/ - flags: - - -tags={{if eq .Os "linux"}}osusergo,netgo,static_build{{if not (eq .Arch "amd64")}},noasm{{end}}{{end}} - env: - - GO111MODULE=on - - CGO_ENABLED=1 - - CC=xcc - - PKG_CONFIG=$GOPATH/bin/pkg-config - - MACOSX_DEPLOYMENT_TARGET=10.11 - ldflags: - - -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} {{if eq .Os "linux"}}-extldflags "-fno-PIC -static -Wl,-z,stack-size=8388608"{{end}} - binary: influx - - - id: influxd - goos: - - linux - - darwin - goarch: - - amd64 - - arm64 - ignore: - - goos: darwin - goarch: arm64 - main: ./cmd/influxd/ - flags: - - -tags=assets{{if eq .Os "linux"}},osusergo,netgo,static_build{{if not (eq .Arch "amd64")}},noasm{{end}}{{end}} - env: - - GO111MODULE=on - - CGO_ENABLED=1 - - CC=xcc - - PKG_CONFIG=$GOPATH/bin/pkg-config - - MACOSX_DEPLOYMENT_TARGET=10.11 - ldflags: - - -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} {{if eq .Os "linux"}}-extldflags "-fno-PIC -static -Wl,-z,stack-size=8388608"{{end}} - binary: influxd - hooks: - pre: make generate - -nfpms: - - id: "influxdb2" - builds: ["influx", "influxd"] - formats: - - deb - - rpm - bindir: /usr/bin - contents: - - src: scripts/init.sh - dst: /usr/lib/influxdb/scripts/init.sh - - src: scripts/influxdb.service - dst: /usr/lib/influxdb/scripts/influxdb.service - - src: scripts/logrotate - dst: /etc/logrotate.d/influxdb - - src: scripts/influxdb2-upgrade.sh - dst: /usr/share/influxdb/influxdb2-upgrade.sh - scripts: - preinstall: "scripts/pre-install.sh" - postinstall: "scripts/post-install.sh" - postremove: "scripts/post-uninstall.sh" - conflicts: - - influxdb - overrides: - rpm: - replacements: - amd64: x86_64 - file_name_template: "influxdb2-{{ .Version }}.{{ .Arch }}" - deb: - file_name_template: "influxdb2_{{ .Version }}_{{ .Arch }}" - vendor: InfluxData - homepage: https://influxdata.com - maintainer: support@influxdb.com - description: Distributed time-series database. - license: MIT - -archives: - - id: influxdb2_client - builds: ["influx"] - format: tar.gz - wrap_in_directory: true - format_overrides: - - goos: windows - format: zip - name_template: "influxdb2_client_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - files: - - LICENSE - - README.md - - id: influxdb2_single_binary - format: tar.gz - wrap_in_directory: true - format_overrides: - - goos: windows - format: zip - name_template: "influxdb2-{{ .Version }}_{{ .Os }}_{{ .Arch }}" - files: - - LICENSE - - README.md - -blobs: - - provider: "s3" - bucket: "dl.influxdata.com" - region: "us-east-1" - folder: "influxdb/releases/" - -checksum: - name_template: "influxdb2_{{ .Version }}.sha256" - algorithm: sha256 - -dockers: - - goos: linux - goarch: amd64 - binaries: - - influxd - - influx - image_templates: - - "quay.io/influxdb/influxdb-amd64:{{ .Tag }}" - dockerfile: docker/influxd/Dockerfile - extra_files: - - docker/influxd/entrypoint.sh - build_flag_templates: - - "--platform=linux/amd64" - use_buildx: true - - goos: linux - goarch: arm64 - binaries: - - influxd - - influx - image_templates: - - "quay.io/influxdb/influxdb-arm64v8:{{ .Tag }}" - dockerfile: docker/influxd/Dockerfile - extra_files: - - docker/influxd/entrypoint.sh - build_flag_templates: - - "--platform=linux/arm64/v8" - use_buildx: true - -docker_manifests: - - name_template: "quay.io/influxdb/influxdb:{{ .Tag }}" - image_templates: - - "quay.io/influxdb/influxdb-amd64:{{ .Tag }}" - - "quay.io/influxdb/influxdb-arm64v8:{{ .Tag }}" - -signs: - - signature: "${artifact}.asc" - cmd: gpg - args: [ "--passphrase", "{{.Env.PASSPHRASE}}", "--pinentry-mode=loopback", "--batch", "--armor", "--detach-sign", "${artifact}"] - artifacts: all - -# Do not make github release -release: - name_template: "v{{.Version}}" - prerelease: auto # when we go to GA remove this line - draft: false # This needs to be false, or the container image will not be published - disable: true # we keep the artifacts in S3 and link from the website diff --git a/CHANGELOG.md b/CHANGELOG.md index 420223df266..371137e6bbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,1213 +1,5 @@ -## unreleased +The Changelog has moved! -### Docker +You can find it at the following URL: -#### ARM64 -This release extends the Docker builds hosted in `quay.io` to support the `linux/arm64` platform. - -#### 2.x nightly images -Prior to this release, competing nightly builds caused the `nightly` Docker tag to contain outdated -binaries. This conflict has been fixed, and the image tagged with `nightly` will now contain `2.x` -binaries built from the `HEAD` of the `master` branch. - -### Breaking Changes - -#### inmem index option removed -This release fully removes the `inmem` indexing option, along with the associated config options: -* `max-series-per-database` -* `max-values-per-tag` - -Replacement `tsi1` indexes will be automatically generated on startup for shards that need it. - -### Bug Fixes - -1. [20339](https://github.com/influxdata/influxdb/pull/20339): Include upgrade helper script in goreleaser manifest. -1. [20348](https://github.com/influxdata/influxdb/pull/20348): Don't show the upgrade notice on fresh `influxdb2` installs. -1. [20348](https://github.com/influxdata/influxdb/pull/20348): Ensure `config.toml` is initialized on fresh `influxdb2` installs. -1. [20349](https://github.com/influxdata/influxdb/pull/20349): Ensure `influxdb` service sees default env variables when running under `init.d`. -1. [20317](https://github.com/influxdata/influxdb/pull/20317): Don't ignore failures to set password during initial user onboarding. -1. [20362](https://github.com/influxdata/influxdb/pull/20362): Don't overwrite stack name/description on `influx stack update`. -1. [20355](https://github.com/influxdata/influxdb/pull/20355): Fix timeout setup for `influxd` graceful shutdown. -1. [20387](https://github.com/influxdata/influxdb/pull/20387): Improve error message shown when `influx` CLI can't find an org by name. -1. [20380](https://github.com/influxdata/influxdb/pull/20380): Remove duplication from task error messages. -1. [20313](https://github.com/influxdata/influxdb/pull/20313): Automatically build `tsi1` indexes for shards that need it instead of falling back to `inmem`. -1. [20313](https://github.com/influxdata/influxdb/pull/20313): Fix logging initialization for storage engine. -1. [20442](https://github.com/influxdata/influxdb/pull/20442): Don't return 500 codes for partial write failures. -1. [20440](https://github.com/influxdata/influxdb/pull/20440): Add confirmation step w/ file sizes before copying data files in `influxd upgrade`. -1. [20409](https://github.com/influxdata/influxdb/pull/20409): Improve messages in DBRP API validation errors. - -## v2.0.3 [2020-12-14] - -### ARM Support - -This release includes our initial ARM64 preview build. - -### Breaking Changes - -#### influxd upgrade -Previously, `influxd upgrade` would attempt to write upgraded `config.toml` files into the same directory as the source -`influxdb.conf` file. If this failed, a warning would be logged and `config.toml` would be written into the `HOME` directory. - -This release breaks this behavior in two ways: -1. By default, `config.toml` is now written into the same directory as the Bolt DB and engine files (`~/.influxdbv2/`) -2. If writing upgraded config fails, the `upgrade` process exits with an error instead of falling back to the `HOME` directory - -Users can use the new `--v2-config-path` option to override the output path for upgraded config if they can't or don't -want to use the default. - -#### v2 packaging -Based on community feedback, the v2 deb and rpm packaging has been improved to avoid confusion between versions. The package -name is now influxdb2 and conflicts with any previous influxdb package (including initial 2.0.0, 2.0.1, and 2.0.2 packages). -Additionally, v2 specific path defaults are now defined and helper scripts are provided for `influxd upgrade` and cleanup cases. - -### Features - -1. [20123](https://github.com/influxdata/influxdb/pull/20123): Allow password to be specified as a CLI option in `influx v1 auth create`. -1. [20123](https://github.com/influxdata/influxdb/pull/20123): Allow password to be specified as a CLI option in `influx v1 auth set-password`. -1. [20110](https://github.com/influxdata/influxdb/pull/20110): Allow for users to specify where V2 config should be written in `influxd upgrade`. -1. [20204](https://github.com/influxdata/influxdb/pull/20204): Improve ID-related error messages for `influx v1 dbrp` commands. -1. [20236](https://github.com/influxdata/influxdb/pull/20236): Delete with predicate. -1. [20322](https://github.com/influxdata/influxdb/pull/20322): Upgrade Flux to v0.99.0. -1. [20327](https://github.com/influxdata/influxdb/pull/20327): Upgrade flux-lsp-browser to v0.5.26. - -### Bug Fixes - -1. [20110](https://github.com/influxdata/influxdb/pull/20110): Use V2 directory for default V2 config path in `influxd upgrade`. -1. [20137](https://github.com/influxdata/influxdb/pull/20137): Fix panic when writing a point with 100 tags. Thanks @foobar! -1. [20151](https://github.com/influxdata/influxdb/pull/20151): Don't log bodies of V1 write requests. -1. [20097](https://github.com/influxdata/influxdb/pull/20097): Ensure Index.Walk fetches matching foreign keys only. -1. [20149](https://github.com/influxdata/influxdb/pull/20149): Enforce max value of 2147483647 on query concurrency to avoid startup panic. -1. [20149](https://github.com/influxdata/influxdb/pull/20149): Enforce max value of 2147483647 on query queue size to avoid startup panic. -1. [20168](https://github.com/influxdata/influxdb/pull/20168): Auto-migrate existing DBRP mappings from old schema to avoid panic. -1. [20201](https://github.com/influxdata/influxdb/pull/20201): Optimize shard lookup in groups containing only one shard. Thanks @StoneYunZhao! -1. [20155](https://github.com/influxdata/influxdb/pull/20155): Respect the `--name` option in `influx setup` whether configs already exist or not. -1. [20155](https://github.com/influxdata/influxdb/pull/20155): Allow for 0 (infinite) values for `--retention` in `influx setup`. -1. [20305](https://github.com/influxdata/influxdb/pull/20305): Set v2 default paths and provide upgrade helper scripts in release packages - -## v2.0.2 [2020-11-19] - -### Features - -1. [19979](https://github.com/influxdata/influxdb/pull/19979): Added functionality to filter task runs by time. -1. [20036](https://github.com/influxdata/influxdb/pull/20036): Warn if V1 users are upgraded, but V1 auth wasn't enabled. -1. [20039](https://github.com/influxdata/influxdb/pull/20039): Export 1.x CQs as part of `influxd upgrade`. -1. [20053](https://github.com/influxdata/influxdb/pull/20053): Upgrade Flux to v0.95.0. -1. [20058](https://github.com/influxdata/influxdb/pull/20058): UI: Upgrade flux-lsp-browser to v0.5.23. -1. [20067](https://github.com/influxdata/influxdb/pull/20067): Add DBRP cli commands as `influxd v1 dbrp`. - -### Bug Fixes - -1. [19987](https://github.com/influxdata/influxdb/pull/19987): Fix various typos. Thanks @kumakichi! -1. [19991](https://github.com/influxdata/influxdb/pull/19991): Use --skip-verify flag for backup/restore CLI command. -1. [19995](https://github.com/influxdata/influxdb/pull/19995): Don't auto-print help on influxd errors -1. [20008](https://github.com/influxdata/influxdb/pull/20008): Add locking during TSI iterator creation. -1. [20012](https://github.com/influxdata/influxdb/pull/20012): Validate input paths to `influxd upgrade` up-front. -1. [20015](https://github.com/influxdata/influxdb/pull/20015): Add same site strict flag to session cookie. -1. [20017](https://github.com/influxdata/influxdb/pull/20017): Don't include duplicates for SHOW DATABASES -1. [20064](https://github.com/influxdata/influxdb/pull/20064): Ensure Flux reads across all shards. -1. [20047](https://github.com/influxdata/influxdb/pull/20047): Allow scraper to ignore insecure certificates on a target. Thanks @cmackenzie1! -1. [20076](https://github.com/influxdata/influxdb/pull/20076): Remove internal `influxd upgrade` subcommands from help text. -1. [20074](https://github.com/influxdata/influxdb/pull/20074): Use default DBRP mapping on V1 write when no RP is specified. -1. [20091](https://github.com/influxdata/influxdb/pull/20091): Make the DBRP http API match the swagger spec. - -## v2.0.1 [2020-11-10] - -### Bug Fixes - -1. [19918](https://github.com/influxdata/influxdb/pull/19918): Swagger: add operationId to /delete -1. [19967](https://github.com/influxdata/influxdb/pull/19967): Upgrade: add log-level option -1. [19969](https://github.com/influxdata/influxdb/pull/19969): Check for existing 2.x CLI configs file -1. [19971](https://github.com/influxdata/influxdb/pull/19971): Swagger: remove Invites from swagger -1. [19972](https://github.com/influxdata/influxdb/pull/19972): Remove obsolete unused option (influx-command-path) -1. [19980](https://github.com/influxdata/influxdb/pull/19980): check write permission in legacy write path - -## v2.0.0 [2020-11-09] - -### Features - -1. [19935](https://github.com/influxdata/influxdb/pull/19935): Improve the UI for the influx v1 auth commands -1. [19940](https://github.com/influxdata/influxdb/pull/19940): Update Flux to v0.94.0 -1. [19943](https://github.com/influxdata/influxdb/pull/19943): Upgrade flux-lsp-browser to v0.5.22 -1. [19946](https://github.com/influxdata/influxdb/pull/19946): Adding RAS telegraf input - -### Bug Fixes - -1. [19924](https://github.com/influxdata/influxdb/pull/19924): Remove unused 'security-script' option from upgrade command -1. [19925](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs in `influxd upgrade` -1. [19928](https://github.com/influxdata/influxdb/pull/19928): Fix parsing of retention policy CLI args in `influx setup` and `influxd upgrade` -1. [19930](https://github.com/influxdata/influxdb/pull/19930): Replace 0 with MaxInt when upgrading query-concurrency -1. [19937](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs -1. [19939](https://github.com/influxdata/influxdb/pull/19939): Make influxd help more specific -1. [19945](https://github.com/influxdata/influxdb/pull/19945): Allow write-only V1 tokens to find DBRPs -1. [19947](https://github.com/influxdata/influxdb/pull/19947): Updating v1 auth description -1. [19952](https://github.com/influxdata/influxdb/pull/19952): Use `db`/`rp` naming convention when migrating DBs to buckets -1. [19956](https://github.com/influxdata/influxdb/pull/19956): Improve help for --no-password switch -1. [19959](https://github.com/influxdata/influxdb/pull/19959): Use 10 instead of MaxInt when rewriting query-concurrency -1. [19960](https://github.com/influxdata/influxdb/pull/19960): Remove bucket and mapping auto-creation from v1 /write API -1. [19885](https://github.com/influxdata/influxdb/pull/19875): Misuse of reflect.SliceHeader - -## v2.0.0-rc.4 [2020-11-05] - -### Features - -1. [19854](https://github.com/influxdata/influxdb/pull/19854): Use v1 authorization for users upgrade -1. [19855](https://github.com/influxdata/influxdb/pull/19855): Enable window pushdowns -1. [19864](https://github.com/influxdata/influxdb/pull/19864): Implement backup/restore CLI subcommands -1. [19865](https://github.com/influxdata/influxdb/pull/19865): Implementation of v1 authorization -1. [19879](https://github.com/influxdata/influxdb/pull/19879): Make sure the query plan nodes have unique ids -1. [19881](https://github.com/influxdata/influxdb/pull/19881): Update Flux to v0.93.0 - -### Bug Fixes - -1. [19685](https://github.com/influxdata/influxdb/pull/19685): Cloning tasks makes actions shared in task list view -1. [19712](https://github.com/influxdata/influxdb/pull/19712): Reduce filesize of influx binary -1. [19819](https://github.com/influxdata/influxdb/pull/19819): Isolate telegraf config service and remove URM interactions -1. [19853](https://github.com/influxdata/influxdb/pull/19853): Use updated HTTP client for authorization service -1. [19856](https://github.com/influxdata/influxdb/pull/19856): Make tagKeys and tagValues work for edge cases involving fields -1. [19870](https://github.com/influxdata/influxdb/pull/19870): Correctly parse float as 64-bits -1. [19873](https://github.com/influxdata/influxdb/pull/19873): Add simple metrics related to installed templates -1. [19885](https://github.com/influxdata/influxdb/pull/19885): Remove extra multiplication of retention policies in onboarding -1. [19887](https://github.com/influxdata/influxdb/pull/19887): Use fluxinit package to init flux library instead of builtin -1. [19886](https://github.com/influxdata/influxdb/pull/19886): Add Logger to constructor function to ensure log field is initialized -1. [19894](https://github.com/influxdata/influxdb/pull/19894): Return empty iterator instead of null in tagValues -1. [19899](https://github.com/influxdata/influxdb/pull/19899): Docs: flux 0.92 functions -1. [19908](https://github.com/influxdata/influxdb/pull/19908): Fix /ready response content type - -## v2.0.0-rc.3 [2020-10-29] - -### Features - -1. [19807](https://github.com/influxdata/influxdb/pull/19807): Enable window agg mean pushdown -1. [19813](https://github.com/influxdata/influxdb/pull/19813): Aggregate array cursors -1. [19815](https://github.com/influxdata/influxdb/pull/19815): Create a v1 authorization service -1. [19826](https://github.com/influxdata/influxdb/pull/19826): Update FLux to v0.91.0 -1. [19829](https://github.com/influxdata/influxdb/pull/19829): Extend CLI with v1 authorization commands -1. [19839](https://github.com/influxdata/influxdb/pull/19839): Add tick generation properties and legendColorizeRows -1. [19840](https://github.com/influxdata/influxdb/pull/19840): Add bcrypt password support to v1 authorizations -1. [19850](https://github.com/influxdata/influxdb/pull/19850): Update generate ticks into an array of properties for each axis - -### Bug Fixes - -1. [19784](https://github.com/influxdata/influxdb/pull/19784): UI: bump papaparse from 4.6.3 to 5.2.0 -1. [19802](https://github.com/influxdata/influxdb/pull/19802): Docs: update PostDBRP docs to reflect mutual exclusive requirement of org vs orgID -1. [19804](https://github.com/influxdata/influxdb/pull/19804): Notifications: move rule service into own package -1. [19816](https://github.com/influxdata/influxdb/pull/19816): Type-convert fs.Bavail for portability -1. [19818](https://github.com/influxdata/influxdb/pull/19818): Notifications: isolate endpoint service -1. [19823](https://github.com/influxdata/influxdb/pull/19823): Clear Logout -1. [19825](https://github.com/influxdata/influxdb/pull/19825): Docs: Update FUZZ.md -1. [19828](https://github.com/influxdata/influxdb/pull/19828): Add 1.x compatible endpoints to swagger -1. [19833](https://github.com/influxdata/influxdb/pull/19833): allow newIndexSeriesCursor() to accept an influxql.Expr -1. [19834](https://github.com/influxdata/influxdb/pull/19834): Docs: Fix typos in http/swagger.yml -1. [19836](https://github.com/influxdata/influxdb/pull/19836): UI: import flux-lsp v0.5.21 -1. [19846](https://github.com/influxdata/influxdb/pull/19846): prune some unreferenced packages - -## v2.0.0-rc.2 [2020-10-21] - -### Features - -1. [19725](https://github.com/influxdata/influxdb/pull/19725): Add window agg result set -1. [19740](https://github.com/influxdata/influxdb/pull/19740): Provide means to remove stack without confirmation -1. [19750](https://github.com/influxdata/influxdb/pull/19750): Return error on failed resource addition -1. [19774](https://github.com/influxdata/influxdb/pull/19774): Update Flux to v0.90.0 - -### Bug Fixes - -1. [19465](https://github.com/influxdata/influxdb/pull/19465): Use valid flux in pkger test templates -1. [19773](https://github.com/influxdata/influxdb/pull/19773): Upgrade: fallback to user's home when saving upgraded config -1. [19775](https://github.com/influxdata/influxdb/pull/19775): Telegraf plugin updates (remove RAS for now) -1. [19776](https://github.com/influxdata/influxdb/pull/19776): TimeMachine: change derivative to 1s -1. [19789](https://github.com/influxdata/influxdb/pull/19789): Launcher: Switch to AuthorizationService from authorization package -1. [19780](https://github.com/influxdata/influxdb/pull/19780): Upgrade: proper default 2.x config filename -1. [19781](https://github.com/influxdata/influxdb/pull/19781): Upgrade: fixing typos and grammar errors - -## v2.0.0-rc.1 [2020-10-14] - -### Features - -1. [19641](https://github.com/influxdata/influxdb/pull/19641): Added `influx upgrade` command for upgrading from 1.x to 2.0 -1. [19746](https://github.com/influxdata/influxdb/pull/19746): Added Intel RDT and RAS Daemon telegraf plugins -1. [19731](https://github.com/influxdata/influxdb/pull/19731): Upgraded Flux to v0.89.0 - -### Bug Fixes - -1. [19708](https://github.com/influxdata/influxdb/pull/19708): Scrapers not working in RC0 -1. [19732](https://github.com/influxdata/influxdb/pull/19732): Update default value of list tasks influx CLI command to 100 -1. [19710](https://github.com/influxdata/influxdb/pull/19710): InfluxDB Templates: allow same duration unit identifiers that the tasks api allows -1. [19700](https://github.com/influxdata/influxdb/pull/19700): InfluxDB Templates: preserve cell colors on export/import -1. [19695](https://github.com/influxdata/influxdb/pull/19695): Influx CLI fix an issue where a single telegraf config was not being returned -1. [19593](https://github.com/influxdata/influxdb/pull/19593): Don't allow short passwords in `influx setup` - -## v2.0.0-rc.0 [2020-09-29] - -### Breaking Changes - -In the interests of simplifying the migration for existing users of InfluxDB 1.x, this -release includes significant breaking changes. - -**Upgrading from previous beta builds of `influxd` is not supported** - -In order to continue using `influxd` betas, users will be required to move all existing -data out of their `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. This -means all existing dashboards, tasks, integrations, alerts, users and tokens will need to -be recreated. The `influx export all` command may be used to export and re-import most -of this data. - -At this time, there is no tooling to convert existing time series data from previous -beta releases. If data from a prior beta release is found, `influxd` will refuse to start. - -We have also changed the default port of InfluxDB from 9999 back to 8086. If you still would like -to run on port 9999, you can start influxd with the `--http-bind-address` option. You will also -need to update any InfluxDB CLI config profiles with the new port number. - -1. [19446](https://github.com/influxdata/influxdb/pull/19446): Port TSM1 storage engine -1. [19494](https://github.com/influxdata/influxdb/pull/19494): Changing the default port from 9999 to 8086 -1. [19636](https://github.com/influxdata/influxdb/pull/19636): Disable unimplemented delete with predicate API - -### Features - -1. [18779](https://github.com/influxdata/influxdb/pull/18779): Add new processing options and enhancements to influx write. -1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use -1. [19334](https://github.com/influxdata/influxdb/pull/19334): Add --active-config flag to influx to set config for single command -1. [19219](https://github.com/influxdata/influxdb/pull/19219): List buckets via the API now supports after (ID) parameter as an alternative to offset. -1. [19390](https://github.com/influxdata/influxdb/pull/19390): Record last success and failure run times in the Task -1. [19402](https://github.com/influxdata/influxdb/pull/19402): Inject Task's LatestSuccess Timestamp In Flux Extern -1. [19433](https://github.com/influxdata/influxdb/pull/19433): Add option to dump raw query results in CLI -1. [19506](https://github.com/influxdata/influxdb/pull/19506): Add TSM 1.x storage options as flags -1. [19508](https://github.com/influxdata/influxdb/pull/19508): Add subset of InfluxQL coordinator options as flags -1. [19457](https://github.com/influxdata/influxdb/pull/19457): Add ability to export resources by name via the CLI -1. [19640](https://github.com/influxdata/influxdb/pull/19640): Turn on Community Templates -1. [19663](https://github.com/influxdata/influxdb/pull/19663): Added InfluxDB v2 Listener, NSD, OPC-UA, and Windows Event Log to the sources page -1. [19662](https://github.com/influxdata/influxdb/pull/19662): Add `max-line-length` switch to `influx write` command to address `token too long` errors for large inputs -1. [19660](https://github.com/influxdata/influxdb/pull/19660): Add --rate-limit option to `influx write`. -1. [19740](https://github.com/influxdata/influxdb/pull/19740): Add `--force` option to `influx stack rm` to skip confirmation - -### Bug Fixes - -1. [19331](https://github.com/influxdata/influxdb/pull/19331): Add description to auth influx command outputs. -1. [19392](https://github.com/influxdata/influxdb/pull/19392): Include the edge of the boundary we are observing. -1. [19453](https://github.com/influxdata/influxdb/pull/19453): Warn about duplicate tag names during influx write csv. -1. [19466](https://github.com/influxdata/influxdb/pull/19466): Do not override existing line part in group annotation. -1. [19637](https://github.com/influxdata/influxdb/pull/19637): Added PATCH to the list of allowed methods - -## v2.0.0-beta.16 [2020-08-07] - -### Breaking - -1. [19066](https://github.com/influxdata/influxdb/pull/19066): Drop deprecated /packages route tree -1. [19116](https://github.com/influxdata/influxdb/pull/19116): Support more types for template envRef default value and require explicit default values -1. [19104](https://github.com/influxdata/influxdb/pull/19104): Remove orgs/labels nested routes from the API. -1. [19653](https://github.com/influxdata/influxdb/pull/19653): Remove PointBatcher from tsdb package API - -### Features - -1. [19075](https://github.com/influxdata/influxdb/pull/19075): Add resource links to a stack's resources from public HTTP API list/read calls -1. [19103](https://github.com/influxdata/influxdb/pull/19103): Enhance resource creation experience when limits are reached -1. [19223](https://github.com/influxdata/influxdb/pull/19223): Add dashboards command to influx CLI -1. [19225](https://github.com/influxdata/influxdb/pull/19225): Allow user onboarding to optionally set passwords -1. [18841](https://github.com/influxdata/influxdb/pull/18841): Limit query response sizes for queries built in QueryBuilder by requiring an aggregate window -1. [19135](https://github.com/influxdata/influxdb/pull/19135): Add telegram notification. - -### Bug Fixes - -1. [19043](https://github.com/influxdata/influxdb/pull/19043): Enforce all influx CLI flag args are valid -1. [19188](https://github.com/influxdata/influxdb/pull/19188): Dashboard cells correctly map results when multiple queries exist -1. [19146](https://github.com/influxdata/influxdb/pull/19146): Dashboard cells and overlay use UTC as query time when toggling to UTC timezone -1. [19222](https://github.com/influxdata/influxdb/pull/19222): Bucket names may not include quotation marks -1. [19317](https://github.com/influxdata/influxdb/pull/19317): Add validation to Variable name creation for valid Flux identifiers. - -### UI Improvements -1. [19231](https://github.com/influxdata/influxdb/pull/19231): Alerts page filter inputs now have tab indices for keyboard navigation -1. [19364](https://github.com/influxdata/influxdb/pull/19364): Errors in OSS are now properly printed to the console - -## v2.0.0-beta.15 [2020-07-23] - -### Breaking - -1. [19004](https://github.com/influxdata/influxdb/pull/19004): Removed the `migrate` command from the `influxd` binary. -1. [18921](https://github.com/influxdata/influxdb/pull/18921): Restricted UI variable names to not clash with Flux reserved words - -### Features - -1. [18888](https://github.com/influxdata/influxdb/pull/18888): Add event source to influx stack operations -1. [18910](https://github.com/influxdata/influxdb/pull/18910): Add uninstall functionality for stacks -1. [18912](https://github.com/influxdata/influxdb/pull/18912): Drop deprecated influx pkg command tree -1. [18997](https://github.com/influxdata/influxdb/pull/18997): Add telegraf management commands to influx CLI -1. [19030](https://github.com/influxdata/influxdb/pull/19030): Enable dynamic destination for the influx CLI configs file -1. [19029](https://github.com/influxdata/influxdb/pull/19029): Navigating away from a dashboard cancels all pending queries -1. [19003](https://github.com/influxdata/influxdb/pull/19003): Upgrade to Flux v0.74.0 -1. [19040](https://github.com/influxdata/influxdb/pull/19040): Drop the REPL command from influx CLI -1. [19032](https://github.com/influxdata/influxdb/pull/19032): Redesign asset & rate limit alerts - -### Bug Fixes - -1. [18891](https://github.com/influxdata/influxdb/pull/18891): Allow 0 to be the custom set minimum value for Y Domain -1. [18969](https://github.com/influxdata/influxdb/pull/18969): Single Stat cells should render properly in Safari again -1. [18974](https://github.com/influxdata/influxdb/pull/18974): Limit variable querying when submitting queries to used variables -1. [19039](https://github.com/influxdata/influxdb/pull/19039): Fix an issue where switching orgs was not redirecting correctly -1. [18989](https://github.com/influxdata/influxdb/pull/18989): Stopped fetching tags in the advanced builder -1. [19044](https://github.com/influxdata/influxdb/pull/19044): Graph customization: X and Y axis properly accept values - -## v2.0.0-beta.14 [2020-07-08] - -### Features - -1. [18758](https://github.com/influxdata/influxdb/pull/18758): Extend influx stacks update cmd with ability to add resources without apply template -1. [18793](https://github.com/influxdata/influxdb/pull/18793): Normalize InfluxDB templates under new /api/v2/templates and /api/v2/stacks public API -1. [18818](https://github.com/influxdata/influxdb/pull/18818): Extend template Summary and Diff nested types with kind identifiers -1. [18857](https://github.com/influxdata/influxdb/pull/18857): Flux updated to v0.71.1 -1. [18805](https://github.com/influxdata/influxdb/pull/18805): Added static builds for Linux - -### Bug Fixes - -1. [18878](https://github.com/influxdata/influxdb/pull/18878): Don't overwrite build date set via ldflags -1. [18842](https://github.com/influxdata/influxdb/pull/18842): Fixed an issue where define query was unusable after importing a Check -1. [18845](https://github.com/influxdata/influxdb/pull/18845): Update documentation links - -## v2.0.0-beta.13 [2020-06-25] - -### Features - -1. [18387](https://github.com/influxdata/influxdb/pull/18387): Integrate query cancellation after queries have been submitted -1. [18515](https://github.com/influxdata/influxdb/pull/18515): Extend templates with the source file|url|reader. -1. [18539](https://github.com/influxdata/influxdb/pull/18539): Collect stats on installed influxdata community template usage. -1. [18541](https://github.com/influxdata/influxdb/pull/18541): Pkger allow raw github.com host URLs for yaml|json|jsonnet URLs -1. [18546](https://github.com/influxdata/influxdb/pull/18546): Influx allow for files to be remotes for all template commands -1. [18560](https://github.com/influxdata/influxdb/pull/18560): Extend stacks API with update capability -1. [18568](https://github.com/influxdata/influxdb/pull/18568): Add support for config files to influxd and any cli.NewCommand use case -1. [18573](https://github.com/influxdata/influxdb/pull/18573): Extend influx stacks cmd with new influx stacks update cmd -1. [18595](https://github.com/influxdata/influxdb/pull/18595): Add ability to skip resources in a template by kind or by metadata.name -1. [18600](https://github.com/influxdata/influxdb/pull/18600): Extend influx apply with resource filter capabilities -1. [18601](https://github.com/influxdata/influxdb/pull/18601): Provide active config running influx config without args -1. [18606](https://github.com/influxdata/influxdb/pull/18606): Enable influxd binary to look for a config file on startup -1. [18647](https://github.com/influxdata/influxdb/pull/18647): Add support for env ref default values to the template parser -1. [18655](https://github.com/influxdata/influxdb/pull/18655): Add support for platform variable selected field to templates - -### Bug Fixes - -1. [18602](https://github.com/influxdata/influxdb/pull/18602): Fix uint overflow during setup on 32bit systems -1. [18623](https://github.com/influxdata/influxdb/pull/18623): Drop support for --local flag within influx CLI -1. [18632](https://github.com/influxdata/influxdb/pull/18632): Prevents undefined queries in cells from erroring out in dashboards -1. [18649](https://github.com/influxdata/influxdb/pull/18649): Fixes bucket selection issue and query builder state -1. [18658](https://github.com/influxdata/influxdb/pull/18658): Add support for 'd' day and 'w' week time identifiers in the CLI for bucket and setup commands -1. [18581](https://github.com/influxdata/influxdb/pull/18581): Cache dashboard cell query results to use as a reference for cell configurations -1. [18707](https://github.com/influxdata/influxdb/pull/18707): Validate host-url for influx config create/set commands -1. [18713](https://github.com/influxdata/influxdb/pull/18713): Fix influx CLI flags to accurately depict flags for all commands - -## v2.0.0-beta.12 [2020-06-12] - -### Features - -1. [18279](https://github.com/influxdata/influxdb/pull/18279): Make all pkg applications stateful via stacks -1. [18322](https://github.com/influxdata/influxdb/pull/18322): Add ability to export a stack's existing (as they are in the platform) resource state as a pkg -1. [18334](https://github.com/influxdata/influxdb/pull/18334): Update influx pkg commands with improved usage and examples in long form. -1. [18344](https://github.com/influxdata/influxdb/pull/18344): Extend influx CLI with version and User-Agent. -1. [18355](https://github.com/influxdata/influxdb/pull/18355): Integrate RedirectTo functionality so CLOUD users now get navigated back to the originally linked page after login -1. [18392](https://github.com/influxdata/influxdb/pull/18392): Consolidate pkg influx commands under templates. This removes some nesting of the CLI commands as part of that. -1. [18400](https://github.com/influxdata/influxdb/pull/18400): Dashboards maintain sort order after navigating away -1. [18480](https://github.com/influxdata/influxdb/pull/18480): Allows tasks to open in new tabs -1. [18553](https://github.com/influxdata/influxdb/pull/18553): Update usage and soften comparisons for kind matching on 'influx export --resourceType' cmd - -### Bug Fixes - -1. [18331](https://github.com/influxdata/influxdb/pull/18331): Support organization name in addition to ID in DBRP operations -1. [18335](https://github.com/influxdata/influxdb/pull/18335): Disable failing when providing an unexpected error to influx CLI -1. [18345](https://github.com/influxdata/influxdb/pull/18345): Have influx delete cmd respect the config -1. [18385](https://github.com/influxdata/influxdb/pull/18385): Store initialization for pkger enforced on reads -1. [18434](https://github.com/influxdata/influxdb/pull/18434): Backfill missing fillColumns field for histograms in pkger -1. [18471](https://github.com/influxdata/influxdb/pull/18471): Notifies the user how to escape presentation mode when the feature is toggled - -### UI Improvements - -1. [18319](https://github.com/influxdata/influxdb/pull/18319): Display bucket ID in bucket list and enable 1 click copying -1. [18361](https://github.com/influxdata/influxdb/pull/18361): Tokens list is now consistent with the other resource lists -1. [18346](https://github.com/influxdata/influxdb/pull/18346): Reduce the number of variables being hydrated when toggling variables -1. [18447](https://github.com/influxdata/influxdb/pull/18447): Redesign dashboard cell loading indicator to be more obvious -1. [18593](https://github.com/influxdata/influxdb/pull/18593): Add copyable User and Organization Ids to About page - -## v2.0.0-beta.11 [2020-05-26] - -### Features - -1. [18011](https://github.com/influxdata/influxdb/pull/18011): Integrate UTC dropdown when making custom time range query -1. [18040](https://github.com/influxdata/influxdb/pull/18040): Allow for min OR max y-axis visualization settings rather than min AND max -1. [17764](https://github.com/influxdata/influxdb/pull/17764): Add CSV to line protocol conversion library -1. [18059](https://github.com/influxdata/influxdb/pull/18059): Make the dropdown width adjustable -1. [18173](https://github.com/influxdata/influxdb/pull/18173): Add version to /health response - -### Bug Fixes - -1. [18066](https://github.com/influxdata/influxdb/pull/18066): Fixed bug that wasn't persisting timeFormat for Graph + Single Stat selections -1. [17959](https://github.com/influxdata/influxdb/pull/17959): Authorizer now exposes full permission set -1. [18071](https://github.com/influxdata/influxdb/pull/18071): Fixed issue that was causing variable selections to hydrate all variable values -1. [18016](https://github.com/influxdata/influxdb/pull/18016): Remove the fancy scrollbars -1. [18171](https://github.com/influxdata/influxdb/pull/18171): Check status now displaying warning if loading a large amount - -## v2.0.0-beta.10 [2020-05-07] - -### Features - -1. [17934](https://github.com/influxdata/influxdb/pull/17934): Add ability to delete a stack and all the resources associated with it -1. [17941](https://github.com/influxdata/influxdb/pull/17941): Enforce DNS name compliance on all pkger resources' metadata.name field -1. [17989](https://github.com/influxdata/influxdb/pull/17989): Add stateful pkg management with stacks -1. [18007](https://github.com/influxdata/influxdb/pull/18007): Add remove and list pkger stack commands to influx CLI -1. [18017](https://github.com/influxdata/influxdb/pull/18017): Fixup display message for interactive influx setup cmd - -### Bug Fixes - -1. [17906](https://github.com/influxdata/influxdb/pull/17906): Ensure UpdateUser cleans up the index when updating names -1. [17933](https://github.com/influxdata/influxdb/pull/17933): Ensure Checks can be set for zero values - -### UI Improvements - -1. [17860](https://github.com/influxdata/influxdb/pull/17860): Allow bucket creation from the Data Explorer and Cell Editor - -## v2.0.0-beta.9 [2020-04-23] - -### Features - -1. [17851](https://github.com/influxdata/influxdb/pull/17851): Add feature flag package capability and flags endpoint - -### Bug Fixes - -1. [17618](https://github.com/influxdata/influxdb/pull/17618): Add index for URM by user ID to improve lookup performance -1. [17751](https://github.com/influxdata/influxdb/pull/17751): Existing session expiration time is respected on session renewal -1. [17817](https://github.com/influxdata/influxdb/pull/17817): Make CLI respect env vars and flags in addition to the configs and extend support for config orgs to all commands - -### UI Improvements - -1. [17714](https://github.com/influxdata/influxdb/pull/17714): Cloud environments no longer render markdown images, for security reasons. -1. [17321](https://github.com/influxdata/influxdb/pull/17321): Improve UI for sorting resources -1. [17740](https://github.com/influxdata/influxdb/pull/17740): Add single-color color schemes for visualizations -1. [17849](https://github.com/influxdata/influxdb/pull/17849): Move Organization navigation items to user menu. - -## v2.0.0-beta.8 [2020-04-10] - -### Features - -1. [17490](https://github.com/influxdata/influxdb/pull/17490): `influx config -`, to switch back to previous activated configuration -1. [17581](https://github.com/influxdata/influxdb/pull/17581): Introduce new navigation menu -1. [17595](https://github.com/influxdata/influxdb/pull/17595): Add -f (--file) option to `influx query` and `influx task` commands -1. [17498](https://github.com/influxdata/influxdb/pull/17498): Added support for command line options to limit memory for queries - -### Bug Fixes - -1. [17257](https://github.com/influxdata/influxdb/pull/17769): Fix retention policy after bucket is migrated -1. [17612](https://github.com/influxdata/influxdb/pull/17612): Fix card size and layout jank in dashboards index view -1. [17651](https://github.com/influxdata/influxdb/pull/17651): Fix check graph font and lines defaulting to black causing graph to be unreadable -1. [17660](https://github.com/influxdata/influxdb/pull/17660): Fix text wrapping display issue and popover sizing bug when adding labels to a resource -1. [17670](https://github.com/influxdata/influxdb/pull/17670): Respect the now-time of the compiled query if it's provided -1. [17692](https://github.com/influxdata/influxdb/pull/17692): Update giraffe to fix spacing between ticks -1. [17694](https://github.com/influxdata/influxdb/pull/17694): Fixed typos in the Flux functions list -1. [17701](https://github.com/influxdata/influxdb/pull/17701): Allow mouse cursor inside Script Editor for Safari -1. [17609](https://github.com/influxdata/influxdb/pull/17609): Fixed an issue where Variables could not use other Variables -1. [17754](https://github.com/influxdata/influxdb/pull/17754): Adds error messaging for Cells in Dashboard View - -### UI Improvements - -1. [17583](https://github.com/influxdata/influxdb/pull/17583): Update layout of Alerts page to work on all screen sizes -1. [17657](https://github.com/influxdata/influxdb/pull/17657): Sort dashboards on Getting Started page by recently modified - -## v2.0.0-beta.7 [2020-03-27] - -### Features - -1. [17232](https://github.com/influxdata/influxdb/pull/17232): Allow dashboards to optionally be displayed in light mode -1. [17273](https://github.com/influxdata/influxdb/pull/17273): Add shell completions command for the influx cli -1. [17353](https://github.com/influxdata/influxdb/pull/17353): Make all pkg resources unique by metadata.name field -1. [17363](https://github.com/influxdata/influxdb/pull/17363): Telegraf config tokens can no longer be retrieved after creation, but new tokens can be created after a telegraf has been setup -1. [17400](https://github.com/influxdata/influxdb/pull/17400): Be able to delete bucket by name via cli -1. [17396](https://github.com/influxdata/influxdb/pull/17396): Add module to write line data to specified url, org, and bucket -1. [17398](https://github.com/influxdata/influxdb/pull/17398): Extend influx cli write command with ability to process CSV data -1. [17448](https://github.com/influxdata/influxdb/pull/17448): Add foundation for pkger stacks, stateful package management -1. [17462](https://github.com/influxdata/influxdb/pull/17462): Flag to disable scheduling of tasks -1. [17470](https://github.com/influxdata/influxdb/pull/17470): Add ability to output cli output as json and hide table headers -1. [17472](https://github.com/influxdata/influxdb/pull/17472): Add an easy way to switch config via cli - -### Bug Fixes - -1. [17240](https://github.com/influxdata/influxdb/pull/17240): NodeJS logo displays properly in Firefox -1. [17363](https://github.com/influxdata/influxdb/pull/17363): Fixed telegraf configuration bugs where system buckets were appearing in the buckets dropdown -1. [17391](https://github.com/influxdata/influxdb/pull/17391): Fixed threshold check bug where checks could not be created when a field had a space in the name -1. [17384](https://github.com/influxdata/influxdb/pull/17384): Reuse slices built by iterator to reduce allocations -1. [17404](https://github.com/influxdata/influxdb/pull/17404): Updated duplicate check error message to be more explicit and actionable -1. [17515](https://github.com/influxdata/influxdb/pull/17515): Editing a table cell shows the proper values and respects changes -1. [17521](https://github.com/influxdata/influxdb/pull/17521): Table view scrolling should be slightly smoother -1. [17601](https://github.com/influxdata/influxdb/pull/17601): URL table values on single columns are being correctly parsed -1. [17552](https://github.com/influxdata/influxdb/pull/17552): Fixed a regression bug that insert aggregate functions where the cursor is rather than a new line - -### UI Improvements - -1. [17291](https://github.com/influxdata/influxdb/pull/17291): Redesign OSS Login page -1. [17297](https://github.com/influxdata/influxdb/pull/17297): Display graphic when a dashboard has no cells - -## v2.0.0-beta.6 [2020-03-12] - -### Features - -1. [17085](https://github.com/influxdata/influxdb/pull/17085): Clicking on bucket name takes user to Data Explorer with bucket selected -1. [17095](https://github.com/influxdata/influxdb/pull/17095): Extend pkger dashboards with table view support -1. [17114](https://github.com/influxdata/influxdb/pull/17114): Allow for retention to be provided to influx setup command as a duration -1. [17138](https://github.com/influxdata/influxdb/pull/17138): Extend pkger export all capabilities to support filtering by lable name and resource type -1. [17049](https://github.com/influxdata/influxdb/pull/17049): Added new login and sign-up screen that for cloud users that allows direct login from their region -1. [17170](https://github.com/influxdata/influxdb/pull/17170): Added new cli multiple profiles management tool -1. [17145](https://github.com/influxdata/influxdb/pull/17145): Update kv.Store to define schema changes via new kv.Migrator types - -### Bug Fixes - -1. [17039](https://github.com/influxdata/influxdb/pull/17039): Fixed issue where tasks are exported for notification rules -1. [17042](https://github.com/influxdata/influxdb/pull/17042): Fixed issue where tasks are not exported when exporting by org id -1. [17070](https://github.com/influxdata/influxdb/pull/17070): Fixed issue where tasks with imports in query break in pkger -1. [17028](https://github.com/influxdata/influxdb/pull/17028): Fixed issue where selecting an aggregate function in the script editor was not adding the function to a new line -1. [17072](https://github.com/influxdata/influxdb/pull/17072): Fixed issue where creating a variable of type map was piping the incorrect value when map variables were used in queries -1. [17050](https://github.com/influxdata/influxdb/pull/17050): Added missing user names to auth CLI commands -1. [17113](https://github.com/influxdata/influxdb/pull/17113): Disabled group functionality for check query builder -1. [17120](https://github.com/influxdata/influxdb/pull/17120): Fixed cell configuration error that was popping up when users create a dashboard and accessed the disk usage cell for the first time -1. [17097](https://github.com/influxdata/influxdb/pull/17097): Listing all the default variables in the VariableTab of the script editor -1. [17049](https://github.com/influxdata/influxdb/pull/17049): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections -1. [17161](https://github.com/influxdata/influxdb/pull/17161): Update table custom decimal feature for tables to update table onFocus -1. [17168](https://github.com/influxdata/influxdb/pull/17168): Fixed UI bug that was setting Telegraf config buttons off-center and was resizing config selections when filtering through the data -1. [17208](https://github.com/influxdata/influxdb/pull/17208): Fixed UI bug that was setting causing dashboard cells to error when the a v.bucket was being used and was being configured for the first time -1. [17214](https://github.com/influxdata/influxdb/pull/17214): Fix appearance of client library logos in Safari -1. [17202](https://github.com/influxdata/influxdb/pull/17202): Fixed UI bug that was preventing checks created with the query builder from updating. Also fixed a bug that was preventing dashboard cell queries from working properly when creating group queries using the query builder - -## v2.0.0-beta.5 [2020-02-27] - -### Features - -1. [16991](https://github.com/influxdata/influxdb/pull/16991): Update Flux functions list for v0.61 -1. [16574](https://github.com/influxdata/influxdb/pull/16574): Add secure flag to session cookie - -### Bug Fixes - -1. [16919](https://github.com/influxdata/influxdb/pull/16919): Sort dashboards on homepage alphabetically -1. [16934](https://github.com/influxdata/influxdb/pull/16934): Tokens page now sorts by status -1. [16931](https://github.com/influxdata/influxdb/pull/16931): Set the default value of tags in a Check -1. [16935](https://github.com/influxdata/influxdb/pull/16935): Fix sort by variable type -1. [16973](https://github.com/influxdata/influxdb/pull/16973): Calculate correct stacked line cumulative when lines are different lengths -1. [17010](https://github.com/influxdata/influxdb/pull/17010): Fixed scrollbar issue where resource cards would overflow the parent container rather than be hidden and scrollable -1. [16992](https://github.com/influxdata/influxdb/pull/16992): Query Builder now groups on column values, not tag values -1. [17013](https://github.com/influxdata/influxdb/pull/17013): Scatterplots can once again render the tooltip correctly -1. [17027](https://github.com/influxdata/influxdb/pull/17027): Drop pkger gauge chart requirement for color threshold type -1. [17040](https://github.com/influxdata/influxdb/pull/17040): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections -1. [16961](https://github.com/influxdata/influxdb/pull/16961): Remove cli confirmation of secret, add an optional parameter of secret value - -## v2.0.0-beta.4 [2020-02-14] - -### Features - -1. [16855](https://github.com/influxdata/influxdb/pull/16855): Added labels to buckets in UI -1. [16842](https://github.com/influxdata/influxdb/pull/16842): Connect monaco editor to Flux LSP server -1. [16856](https://github.com/influxdata/influxdb/pull/16856): Update Flux to v0.59.6 - -### Bug Fixes - -1. [16852](https://github.com/influxdata/influxdb/pull/16852): Revert for bad indexing of UserResourceMappings and Authorizations -1. [15911](https://github.com/influxdata/influxdb/pull/15911): Gauge no longer allowed to become too small -1. [16878](https://github.com/influxdata/influxdb/pull/16878): Fix issue with INFLUX_TOKEN env vars being overridden by default token - -## v2.0.0-beta.3 [2020-02-11] - -### Features - -1. [16765](https://github.com/influxdata/influxdb/pull/16765): Extend influx cli pkg command with ability to take multiple files and directories -1. [16767](https://github.com/influxdata/influxdb/pull/16767): Extend influx cli pkg command with ability to take multiple urls, files, directories, and stdin at the same time -1. [16786](https://github.com/influxdata/influxdb/pull/16786): influx cli can manage secrets. - -### Bug Fixes - -1. [16733](https://github.com/influxdata/influxdb/pull/16733): Fix notification rule renaming panics from UI -1. [16769](https://github.com/influxdata/influxdb/pull/16769): Fix the tooltip for stacked line graphs -1. [16825](https://github.com/influxdata/influxdb/pull/16825): Fixed false success notification for read-only users creating dashboards -1. [16822](https://github.com/influxdata/influxdb/pull/16822): Fix issue with pkger/http stack crashing on dupe content type - -## v2.0.0-beta.2 [2020-01-24] - -### Features - -1. [16711](https://github.com/influxdata/influxdb/pull/16711): Query Builder supports group() function (change the dropdown from filter to group) -1. [16523](https://github.com/influxdata/influxdb/pull/16523): Change influx packages to be CRD compliant -1. [16547](https://github.com/influxdata/influxdb/pull/16547): Allow trailing newline in credentials file and CLI integration -1. [16545](https://github.com/influxdata/influxdb/pull/16545): Add support for prefixed cursor search to ForwardCursor types -1. [16504](https://github.com/influxdata/influxdb/pull/16504): Add backup and restore -1. [16522](https://github.com/influxdata/influxdb/pull/16522): Introduce resource logger to tasks, buckets and organizations - -### Bug Fixes - -1. [16656](https://github.com/influxdata/influxdb/pull/16656): Check engine closed before collecting index metrics -1. [16412](https://github.com/influxdata/influxdb/pull/16412): Reject writes which use any of the reserved tag keys -1. [16715](https://github.com/influxdata/influxdb/pull/16715): Fixed dashboard mapping for getDashboards to map correct prop -1. [16716](https://github.com/influxdata/influxdb/pull/16716): Improve the lacking error responses for unmarshal errors in org service - -### Bug Fixes - -1. [16527](https://github.com/influxdata/influxdb/pull/16527): fix /telegrafs panics when using org=org_name parameter - -### UI Improvements - -1. [16575](https://github.com/influxdata/influxdb/pull/16575): Swap billingURL with checkoutURL -1. [16203](https://github.com/influxdata/influxdb/pull/16203): Move cloud navigation to top of page instead of within left side navigation -1. [16536](https://github.com/influxdata/influxdb/pull/16536): Adjust aggregate window periods to be more "reasonable". Use duration input with validation. - -## v2.0.0-beta.1 [2020-01-08] - -### Features - -1. [16234](https://github.com/influxdata/influxdb/pull/16234): Add support for notification endpoints to influx templates/pkgs. -1. [16242](https://github.com/influxdata/influxdb/pull/16242): Drop id prefix for secret key requirement for notification endpoints -1. [16259](https://github.com/influxdata/influxdb/pull/16259): Add support for check resource to pkger parser -1. [16262](https://github.com/influxdata/influxdb/pull/16262): Add support for check resource pkger dry run functionality -1. [16275](https://github.com/influxdata/influxdb/pull/16275): Add support for check resource pkger apply functionality -1. [16283](https://github.com/influxdata/influxdb/pull/16283): Add support for check resource pkger export functionality -1. [16212](https://github.com/influxdata/influxdb/pull/16212): Add new kv.ForwardCursor interface -1. [16297](https://github.com/influxdata/influxdb/pull/16297): Add support for notification rule to pkger parser -1. [16298](https://github.com/influxdata/influxdb/pull/16298): Add support for notification rule pkger dry run functionality -1. [16305](https://github.com/influxdata/influxdb/pull/16305): Add support for notification rule pkger apply functionality -1. [16312](https://github.com/influxdata/influxdb/pull/16312): Add support for notification rule pkger export functionality -1. [16320](https://github.com/influxdata/influxdb/pull/16320): Add support for tasks to pkger parser -1. [16322](https://github.com/influxdata/influxdb/pull/16322): Add support for tasks to pkger dry run functionality -1. [16323](https://github.com/influxdata/influxdb/pull/16323): Add support for tasks to pkger apply functionality -1. [16324](https://github.com/influxdata/influxdb/pull/16324): Add support for tasks to pkger export functionality -1. [16226](https://github.com/influxdata/influxdb/pull/16226): Add group() to Query Builder -1. [16338](https://github.com/influxdata/influxdb/pull/16338): Add last run status to check and notification rules -1. [16340](https://github.com/influxdata/influxdb/pull/16340): Add last run status to tasks -1. [16341](https://github.com/influxdata/influxdb/pull/16341): Extend pkger apply functionality with ability to provide secrets outside of pkg -1. [16345](https://github.com/influxdata/influxdb/pull/16345): Add hide headers flag to influx cli task find cmd -1. [16336](https://github.com/influxdata/influxdb/pull/16336): Manual Overrides for Readiness Endpoint -1. [16347](https://github.com/influxdata/influxdb/pull/16347): Drop legacy inmem service implementation in favor of kv service with inmem dependency -1. [16348](https://github.com/influxdata/influxdb/pull/16348): Drop legacy bolt service implementation in favor of kv service with bolt dependency -1. [16014](https://github.com/influxdata/influxdb/pull/16014): While creating check, also display notification rules that would match check based on tag rules -1. [16389](https://github.com/influxdata/influxdb/pull/16389): Increase default bucket retention period to 30 days -1. [16430](https://github.com/influxdata/influxdb/pull/16430): Added toggle to table thresholds to allow users to choose between setting threshold colors to text or background -1. [16418](https://github.com/influxdata/influxdb/pull/16418): Add Developer Documentation -1. [16260](https://github.com/influxdata/influxdb/pull/16260): Capture User-Agent header as query source for logging purposes -1. [16469](https://github.com/influxdata/influxdb/pull/16469): Add support for configurable max batch size in points write handler -1. [16509](https://github.com/influxdata/influxdb/pull/16509): Add support for applying an influx package via a public facing URL -1. [16511](https://github.com/influxdata/influxdb/pull/16511): Add jsonnet support for influx packages -1. [14782](https://github.com/influxdata/influxdb/pull/16336): Add view page for Check -1. [16537](https://github.com/influxdata/influxdb/pull/16537): Add update password for CLI - -### Bug Fixes - -1. [16225](https://github.com/influxdata/influxdb/pull/16225): Ensures env vars are applied consistently across cmd, and fixes issue where INFLUX\_ env var prefix was not set globally. -1. [16235](https://github.com/influxdata/influxdb/pull/16235): Removed default frontend sorting when flux queries specify sorting -1. [16238](https://github.com/influxdata/influxdb/pull/16238): Store canceled task runs in the correct bucket -1. [16237](https://github.com/influxdata/influxdb/pull/16237): Updated Sortby functionality for table frontend sorts to sort numbers correctly -1. [16249](https://github.com/influxdata/influxdb/pull/16249): Prevent potential infinite loop when finding tasks by organization. -1. [16255](https://github.com/influxdata/influxdb/pull/16255): Retain user input when parsing invalid JSON during import -1. [16268](https://github.com/influxdata/influxdb/pull/16268): Fixed test flakiness that stemmed from multiple flush/signins being called in the same test suite -1. [16346](https://github.com/influxdata/influxdb/pull/16346): Update pkger task export to only trim out option task and not all vars provided -1. [16374](https://github.com/influxdata/influxdb/pull/16374): Update influx CLI, only show "see help" message, instead of the whole usage. -1. [16380](https://github.com/influxdata/influxdb/pull/16380): Fix notification tag matching rules and enable tests to verify -1. [16376](https://github.com/influxdata/influxdb/pull/16376): Extend the y-axis when stacked graph is selected -1. [16404](https://github.com/influxdata/influxdb/pull/16404): Fixed query reset bug that was resetting query in script editor whenever dates were changed -1. [16430](https://github.com/influxdata/influxdb/pull/16430): Fixed table threshold bug that was defaulting set colors to the background. -1. [16435](https://github.com/influxdata/influxdb/pull/16435): Time labels are no longer squished to the left -1. [16427](https://github.com/influxdata/influxdb/pull/16427): Fixed underlying issue with disappearing queries made in Advanced Mode -1. [16439](https://github.com/influxdata/influxdb/pull/16439): Prevent negative zero and allow zero to have decimal places -1. [16376](https://github.com/influxdata/influxdb/pull/16413): Limit data loader bucket selection to non system buckets -1. [16458](https://github.com/influxdata/influxdb/pull/16458): Fix EOF error when manually running tasks from the Task Page. -1. [16491](https://github.com/influxdata/influxdb/pull/16491): Add missing env vals to influx cli usage and fixes precedence of flag/env var priority - -### UI Improvements - -1. [16444](https://github.com/influxdata/influxdb/pull/16444): Add honeybadger reporting to create checks - -## v2.0.0-alpha.21 [2019-12-13] - -### Features - -1. [15836](https://github.com/influxdata/influxdb/pull/16077): Add stacked line layer option to graphs -1. [16094](https://github.com/influxdata/influxdb/pull/16094): Annotate log messages with trace ID, if available -1. [16187](https://github.com/influxdata/influxdb/pull/16187): Bucket create to accept an org name flag -1. [16158](https://github.com/influxdata/influxdb/pull/16158): Add trace ID response header to query endpoint - -### Bug Fixes - -1. [15655](https://github.com/influxdata/influxdb/pull/15655): Allow table columns to be draggable in table settings -1. [15757](https://github.com/influxdata/influxdb/pull/15757): Light up the home page icon when active -1. [15797](https://github.com/influxdata/influxdb/pull/15797): Make numeric inputs first class citizens -1. [15853](https://github.com/influxdata/influxdb/pull/15853): Prompt users to make a dashboard when dashboards are empty -1. [15884](https://github.com/influxdata/influxdb/pull/15884): Remove name editing from query definition during threshold check creation -1. [15975](https://github.com/influxdata/influxdb/pull/15975): Wait until user stops dragging and releases marker before zooming in after threshold changes -1. [16057](https://github.com/influxdata/influxdb/pull/16057): Adds `properties` to each cell on GET /dashboards/{dashboardID} -1. [16101](https://github.com/influxdata/influxdb/pull/16101): Gracefully handle invalid user-supplied JSON -1. [16105](https://github.com/influxdata/influxdb/pull/16105): Fix crash when loading queries built using Query Builder -1. [16112](https://github.com/influxdata/influxdb/pull/16112): Create cell view properties on dashboard creation -1. [16144](https://github.com/influxdata/influxdb/pull/16144): Scrollbars are dapper and proper -1. [16172](https://github.com/influxdata/influxdb/pull/16172): Fixed table ui threshold colorization issue where setting thresholds would not change table UI -1. [16194](https://github.com/influxdata/influxdb/pull/16194): Fixed windowPeriod issue that stemmed from webpack rules -1. [16175](https://github.com/influxdata/influxdb/pull/16175): Added delete functionality to note cells so that they can be deleted -1. [16204](https://github.com/influxdata/influxdb/pull/16204): Fix failure to create labels when creating telegraf configs -1. [16207](https://github.com/influxdata/influxdb/pull/16207): Fix crash when editing a Telegraf config -1. [16201](https://github.com/influxdata/influxdb/pull/16201): Updated start/endtime functionality so that custom script timeranges overwrite dropdown selections -1. [16217](https://github.com/influxdata/influxdb/pull/16217): Fix 12-hour time format to use consistent formatting and number of time ticks - -### UI Improvements - -## v2.0.0-alpha.20 [2019-11-20] - -### Features - -1. [15805](https://github.com/influxdata/influxdb/pull/15924): Add tls insecure skip verify to influx CLI. -1. [15981](https://github.com/influxdata/influxdb/pull/15981): Extend influx cli user create to allow for organization ID and user passwords to be set on user. -1. [15983](https://github.com/influxdata/influxdb/pull/15983): Autopopulate organization ids in the code samples -1. [15749](https://github.com/influxdata/influxdb/pull/15749): Expose bundle analysis tools for frontend resources -1. [15674](https://github.com/influxdata/influxdb/pull/15674): Allow users to view just the output section of a telegraf config -1. [15923](https://github.com/influxdata/influxdb/pull/15923): Allow the users to see string data in the single stat graph type - -### Bug Fixes - -1. [15777](https://github.com/influxdata/influxdb/pull/15777): Fix long startup when running 'influx help' -1. [15713](https://github.com/influxdata/influxdb/pull/15713): Mock missing Flux dependencies when creating tasks -1. [15731](https://github.com/influxdata/influxdb/pull/15731): Ensure array cursor iterator stats accumulate all cursor stats -1. [15866](https://github.com/influxdata/influxdb/pull/15866): Do not show Members section in Cloud environments -1. [15801](https://github.com/influxdata/influxdb/pull/15801): Change how cloud mode is enabled -1. [15820](https://github.com/influxdata/influxdb/pull/15820): Merge frontend development environments -1. [15944](https://github.com/influxdata/influxdb/pull/15944): Refactor table state logic on the frontend -1. [15920](https://github.com/influxdata/influxdb/pull/15920): Arrows in tables now show data in ascending and descening order -1. [15728](https://github.com/influxdata/influxdb/pull/15728): Sort by retention rules now sorts by seconds -1. [15628](https://github.com/influxdata/influxdb/pull/15628): Horizontal scrollbar no longer covering data - -### UI Improvements - -1. [15809](https://github.com/influxdata/influxdb/pull/15809): Redesign cards and animations on getting started page -1. [15787](https://github.com/influxdata/influxdb/pull/15787): Allow the users to filter with labels in telegraph input search - -## v2.0.0-alpha.19 [2019-10-30] - -### Features - -1. [15313](https://github.com/influxdata/influxdb/pull/15313): Add shortcut for toggling comments in script editor -1. [15650](https://github.com/influxdata/influxdb/pull/15650): Expose last run status and last run error in task API - -### UI Improvements - -1. [15503](https://github.com/influxdata/influxdb/pull/15503): Redesign page headers to be more space efficient -1. [15426](https://github.com/influxdata/influxdb/pull/15426): Add 403 handler that redirects back to the sign-in page on oats-generated routes. -1. [15710](https://github.com/influxdata/influxdb/pull/15710): Add button to nginx and redis configuration sections to make interaction more clear - -### Bug Fixes - -1. [15295](https://github.com/influxdata/influxdb/pull/15295): Ensures users are created with an active status -1. [15306](https://github.com/influxdata/influxdb/pull/15306): Added missing string values for CacheStatus type -1. [15348](https://github.com/influxdata/influxdb/pull/15348): Disable saving for threshold check if no threshold selected -1. [15354](https://github.com/influxdata/influxdb/pull/15354): Query variable selector shows variable keys, not values -1. [15246](https://github.com/influxdata/influxdb/pull/15427): UI/Telegraf filter functionality shows results based on input name -1. [13940](https://github.com/influxdata/influxdb/pull/15443): Create Label Overlay UI will disable the submit button and return a UI error if the name field is empty -1. [15452](https://github.com/influxdata/influxdb/pull/15452): Log error as info message on unauthorized API call attempts -1. [15504](https://github.com/influxdata/influxdb/pull/15504): Ensure members&owners eps 404 when /org resource does not exist -1. [15510](https://github.com/influxdata/influxdb/pull/15510): UI/Telegraf sort functionality fixed -1. [15549](https://github.com/influxdata/influxdb/pull/15549): UI/Task edit functionality fixed -1. [15559](https://github.com/influxdata/influxdb/pull/15559): Exiting a configuration of a dashboard cell now properly renders the cell content -1. [15556](https://github.com/influxdata/influxdb/pull/15556): Creating a check now displays on the checklist -1. [15592](https://github.com/influxdata/influxdb/pull/15592): Changed task runs success status code from 200 to 201 to match Swagger documentation. -1. [15634](https://github.com/influxdata/influxdb/pull/15634): TextAreas have the correct height -1. [15647](https://github.com/influxdata/influxdb/pull/15647): Ensures labels are unique by organization in the kv store -1. [15695](https://github.com/influxdata/influxdb/pull/15695): Ensures variable names are unique by organization - -## v2.0.0-alpha.18 [2019-09-26] - -### Features - -1. [15151](https://github.com/influxdata/influxdb/pull/15151): Add jsonweb package for future JWT support -1. [15168](https://github.com/influxdata/influxdb/pull/15168): Added the JMeter Template dashboard -1. [15152](https://github.com/influxdata/influxdb/pull/15152): Add JWT support to http auth middleware - -### UI Improvements - -1. [15211](https://github.com/influxdata/influxdb/pull/15211): Display dashboards index as a grid -1. [15099](https://github.com/influxdata/influxdb/pull/15099): Add viewport scaling to html meta for responsive mobile scaling -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Remove rename and delete functionality from system buckets -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent new buckets from being named with the reserved "\_" prefix -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent user from selecting system buckets when creating Scrapers, Telegraf configurations, read/write tokens, and when saving as a task -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Limit values from draggable threshold handles to 2 decimal places -1. [15040](https://github.com/influxdata/influxdb/pull/15040): Redesign check builder UI to fill the screen and make more room for composing message templates -1. [14990](https://github.com/influxdata/influxdb/pull/14990): Move Tokens tab from Settings to Load Data page -1. [14990](https://github.com/influxdata/influxdb/pull/14990): Expose all Settings tabs in navigation menu -1. [15289](https://github.com/influxdata/influxdb/pull/15289): Added Stream and table functions to query builder - -### Bug Fixes - -1. [14931](https://github.com/influxdata/influxdb/pull/14931): Remove scrollbars blocking onboarding UI step. - -## v2.0.0-alpha.17 [2019-08-14] - -### Features - -1. [14809](https://github.com/influxdata/influxdb/pull/14809): Add task middleware's for checks and notifications -1. [14495](https://github.com/influxdata/influxdb/pull/14495): optional gzip compression of the query CSV response. -1. [14567](https://github.com/influxdata/influxdb/pull/14567): Add task types. -1. [14604](https://github.com/influxdata/influxdb/pull/14604): When getting task runs from the API, runs will be returned in order of most recently scheduled first. -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Added Github and Apache templates -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Updated name of Local Metrics template -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Dashboards for all Telegraf config bundles now created -1. [14694](https://github.com/influxdata/influxdb/pull/14694): Add ability to find tasks by name. -1. [14901](https://github.com/influxdata/influxdb/pull/14901): Add ability to Peek() on reads package StreamReader types. - -### UI Improvements - -1. [14917](https://github.com/influxdata/influxdb/pull/14917): Make first steps in Monitoring & Alerting more obvious -1. [14889](https://github.com/influxdata/influxdb/pull/14889): Make adding data to buckets more discoverable -1. [14709](https://github.com/influxdata/influxdb/pull/14709): Move Buckets, Telgrafs, and Scrapers pages into a tab called "Load Data" for ease of discovery -1. [14846](https://github.com/influxdata/influxdb/pull/14846): Standardize formatting of "updated at" timestamp in all resource cards -1. [14887](https://github.com/influxdata/influxdb/pull/14887): Move no buckets warning in telegraf tab above the search box - -### Bug Fixes - -1. [14480](https://github.com/influxdata/influxdb/pull/14480): Fix authentication when updating a task with invalid org or bucket. -1. [14497](https://github.com/influxdata/influxdb/pull/14497): Update the documentation link for Telegraf. -1. [14492](https://github.com/influxdata/influxdb/pull/14492): Fix to surface errors properly as task notifications on create. -1. [14569](https://github.com/influxdata/influxdb/pull/14569): Fix limiting of get runs for task. -1. [14779](https://github.com/influxdata/influxdb/pull/14779): Refactor tasks coordinator. -1. [14846](https://github.com/influxdata/influxdb/pull/14846): Ensure onboarding "advanced" button goes to correct location - -## v2.0.0-alpha.16 [2019-07-25] - -### Bug Fixes - -1. [14385](https://github.com/influxdata/influxdb/pull/14385): Add link to Documentation text in line protocol upload overlay -1. [14344](https://github.com/influxdata/influxdb/pull/14344): Fix issue in Authorization API, can't create auth for another user. -1. [14352](https://github.com/influxdata/influxdb/pull/14352): Fix Influx CLI ignored user flag for auth creation. -1. [14379](https://github.com/influxdata/influxdb/pull/14379): Fix the map example in the documentation -1. [14423](https://github.com/influxdata/influxdb/pull/14423): Ignore null/empty Flux rows which prevents a single stat/gauge crash. -1. [14434](https://github.com/influxdata/influxdb/pull/14434): Fixes an issue where clicking on a dashboard name caused an incorrect redirect. -1. [14441](https://github.com/influxdata/influxdb/pull/14441): Upgrade templates lib to 0.5.0 -1. [14453](https://github.com/influxdata/influxdb/pull/14453): Upgrade giraffe lib to 0.16.1 -1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task -1. [14356](https://github.com/influxdata/influxdb/pull/14356): Fix an issue where canceled tasks did not resume. - -## v2.0.0-alpha.15 [2019-07-11] - -### Features - -1. [14256](https://github.com/influxdata/influxdb/pull/14256): Add time zone support to UI -2. [14243](https://github.com/influxdata/influxdb/pull/14243): Addded new storage inspection tool to verify tsm files -3. [14353](https://github.com/influxdata/influxdb/pull/14353): Require a token to be supplied for all task creation - -### Bug Fixes - -1. [14287](https://github.com/influxdata/influxdb/pull/14287): Fix incorrect reporting of task as successful when error occurs during result iteration -1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task - -### Known Issues - -1. [influxdata/flux#1492](https://github.com/influxdata/flux/issues/1492): Null support in Flux was introduced in Alhpa 14. Several null issues were fixed in this release, but one known issue remains - Users may hit a panic if the first record processed by a map function has a null value. - -## v2.0.0-alpha.14 [2019-06-28] - -### Features - -1. [14221](https://github.com/influxdata/influxdb/pull/14221): Add influxd inspect verify-wal tool -1. [14218](https://github.com/influxdata/influxdb/commit/4faf2a24def4f351aef5b3c0f2907c385f82fdb9): Move to Flux .34.2 - which includes new string functions and initial multi-datasource support with Sql.from() -1. [14164](https://github.com/influxdata/influxdb/pull/14164): Only click save once to save cell -1. [14188](https://github.com/influxdata/influxdb/pull/14188): Enable selecting more columns for line visualizations - -### UI Improvements - -1. [14194](https://github.com/influxdata/influxdb/pull/14194): Draw gauges correctly on HiDPI displays -1. [14194](https://github.com/influxdata/influxdb/pull/14194): Clamp gauge position to gauge domain -1. [14168](https://github.com/influxdata/influxdb/pull/14168): Improve display of error messages -1. [14157](https://github.com/influxdata/influxdb/pull/14157): Remove rendering bottleneck when streaming Flux responses -1. [14165](https://github.com/influxdata/influxdb/pull/14165): Prevent variable dropdown from clipping - -## v2.0.0-alpha.13 [2019-06-13] - -### Features - -1. [14130](https://github.com/influxdata/influxdb/pull/14130): Add static templates for system, docker, redis, kubernetes -1. [14189](https://github.com/influxdata/influxdb/pull/14189): Add option to select a token when creating a task -1. [14200](https://github.com/influxdata/influxdb/pull/14200): Add the ability to update a token when updating a task - -## v2.0.0-alpha.12 [2019-06-13] - -### Features - -1. [14059](https://github.com/influxdata/influxdb/pull/14059): Enable formatting line graph y ticks with binary prefix -1. [14052](https://github.com/influxdata/influxdb/pull/14052): Add x and y column pickers to graph types -1. [14128](https://github.com/influxdata/influxdb/pull/14128): Add option to shade area below line graphs - -### Bug Fixes - -1. [14085](https://github.com/influxdata/influxdb/pull/14085): Fix performance regression in graph tooltips - -### UI Improvements - -## v2.0.0-alpha.11 [2019-05-31] - -1. [14031](https://github.com/influxdata/influxdb/pull/14031): Correctly check if columnKeys include xColumn in heatmap - -## v2.0.0-alpha.10 [2019-05-30] - -### Features - -1. [13945](https://github.com/influxdata/influxdb/pull/13945): Add heatmap visualization type -1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add scatter graph visualization type -1. [13850](https://github.com/influxdata/influxdb/pull/13850): Add description field to Tasks -1. [13924](https://github.com/influxdata/influxdb/pull/13924): Add CLI arguments for configuring session length and renewal -1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add smooth interpolation option to line graphs - -### Bug Fixes - -1. [13753](https://github.com/influxdata/influxdb/pull/13753): Removed hardcoded bucket for Getting Started with Flux dashboard -1. [13783](https://github.com/influxdata/influxdb/pull/13783): Ensure map type variables allow for selecting values -1. [13800](https://github.com/influxdata/influxdb/pull/13800): Generate more idiomatic Flux in query builder -1. [13797](https://github.com/influxdata/influxdb/pull/13797): Expand tab key presses to 2 spaces in the Flux editor -1. [13823](https://github.com/influxdata/influxdb/pull/13823): Prevent dragging of Variable Dropdowns when dragging a scrollbar inside the dropdown -1. [13853](https://github.com/influxdata/influxdb/pull/13853): Improve single stat computation -1. [13945](https://github.com/influxdata/influxdb/pull/13945): Fix crash when opening histogram settings with no data - -### UI Improvements - -1. [#13835](https://github.com/influxdata/influxdb/pull/13835): Render checkboxes in query builder tag selection lists -1. [#13856](https://github.com/influxdata/influxdb/pull/13856): Fix jumbled card text in Telegraf configuration wizard -1. [#13888](https://github.com/influxdata/influxdb/pull/13888): Change scrapers in scrapers list to be resource cards -1. [#13925](https://github.com/influxdata/influxdb/pull/13925): Export and download resource with formatted resource name with no spaces - -## v2.0.0-alpha.9 [2019-05-01] - -**NOTE: This will remove all tasks from your InfluxDB v2.0 instance.** - -### Features - -1. [13423](https://github.com/influxdata/influxdb/pull/13423): Set autorefresh of dashboard to pause if absolute time range is selected -1. [13473](https://github.com/influxdata/influxdb/pull/13473): Switch task back end to a more modular and flexible system -1. [13493](https://github.com/influxdata/influxdb/pull/13493): Add org profile tab with ability to edit organization name -1. [13510](https://github.com/influxdata/influxdb/pull/13510): Add org name to dahboard page title -1. [13520](https://github.com/influxdata/influxdb/pull/13520): Add cautioning to bucket renaming -1. [13560](https://github.com/influxdata/influxdb/pull/13560): Add option to generate all access token in tokens tab -1. [13601](https://github.com/influxdata/influxdb/pull/13601): Add option to generate read/write token in tokens tab -1. [13715](https://github.com/influxdata/influxdb/pull/13715): Added a new Local Metrics Dashboard template that is created during Quick Start - -### Bug Fixes - -1. [13584](https://github.com/influxdata/influxdb/pull/13584): Fixed scroll clipping found in label editing flow -1. [13585](https://github.com/influxdata/influxdb/pull/13585): Prevent overlapping text and dot in time range dropdown -1. [13602](https://github.com/influxdata/influxdb/pull/13602): Updated link in notes cell to a more useful site -1. [13618](https://github.com/influxdata/influxdb/pull/13618): Show error message when adding line protocol -1. [13657](https://github.com/influxdata/influxdb/pull/13657): Update UI Flux function documentation -1. [13718](https://github.com/influxdata/influxdb/pull/13718): Updated System template to support math with floats -1. [13732](https://github.com/influxdata/influxdb/pull/13732): Fixed the window function documentation -1. [13738](https://github.com/influxdata/influxdb/pull/13738): Fixed typo in the `range` Flux function example -1. [13742](https://github.com/influxdata/influxdb/pull/13742): Updated the `systemTime` function to use `system.time` - -### UI Improvements - -1. [13424](https://github.com/influxdata/influxdb/pull/13424): Add general polish and empty states to Create Dashboard from Template overlay - -## v2.0.0-alpha.8 [2019-04-12] - -### Features - -1. [13024](https://github.com/influxdata/influxdb/pull/13024): Add the ability to edit token's description -1. [13078](https://github.com/influxdata/influxdb/pull/13078): Add the option to create a Dashboard from a Template. -1. [13161](https://github.com/influxdata/influxdb/pull/13161): Add the ability to add labels on variables -1. [13171](https://github.com/influxdata/influxdb/pull/13171): Add switch organizations dropdown to home navigation menu item. -1. [13173](https://github.com/influxdata/influxdb/pull/13173): Add create org to side nav -1. [13345](https://github.com/influxdata/influxdb/pull/13345): Added a new Getting Started with Flux Template - -### Bug Fixes - -1. [13284](https://github.com/influxdata/influxdb/pull/13284): Update shift to timeShift in the flux functions side bar - -### UI Improvements - -1. [13287](https://github.com/influxdata/influxdb/pull/13287): Update cursor to grab when hovering draggable areas -1. [13311](https://github.com/influxdata/influxdb/pull/13311): Sync note editor text and preview scrolling -1. [13249](https://github.com/influxdata/influxdb/pull/13249): Add the ability to create a bucket when creating an organization - -## v2.0.0-alpha.7 [2019-03-28] - -### Features - -1. [12663](https://github.com/influxdata/influxdb/pull/12663): Insert flux function near cursor in flux editor -1. [12678](https://github.com/influxdata/influxdb/pull/12678): Enable the use of variables in the Data Explorer and Cell Editor Overlay -1. [12655](https://github.com/influxdata/influxdb/pull/12655): Add a variable control bar to dashboards to select values for variables. -1. [12706](https://github.com/influxdata/influxdb/pull/12706): Add ability to add variable to script from the side menu. -1. [12791](https://github.com/influxdata/influxdb/pull/12791): Use time range for metaqueries in Data Explorer and Cell Editor Overlay -1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View -1. [12843](https://github.com/influxdata/influxdb/pull/12843): Add copy to clipboard button to export overlays -1. [12826](https://github.com/influxdata/influxdb/pull/12826): Enable copying error messages to the clipboard from dashboard cells -1. [12876](https://github.com/influxdata/influxdb/pull/12876): Add the ability to update token's status in Token list -1. [12821](https://github.com/influxdata/influxdb/pull/12821): Allow variables to be re-ordered within control bar on a dashboard. -1. [12888](https://github.com/influxdata/influxdb/pull/12888): Add the ability to delete a template -1. [12901](https://github.com/influxdata/influxdb/pull/12901): Save user preference for variable control bar visibility and default to visible -1. [12910](https://github.com/influxdata/influxdb/pull/12910): Add the ability to clone a template -1. [12958](https://github.com/influxdata/influxdb/pull/12958): Add the ability to import a variable - -### Bug Fixes - -1. [12684](https://github.com/influxdata/influxdb/pull/12684): Fix mismatch in bucket row and header -1. [12703](https://github.com/influxdata/influxdb/pull/12703): Allows user to edit note on cell -1. [12764](https://github.com/influxdata/influxdb/pull/12764): Fix empty state styles in scrapers in org view -1. [12790](https://github.com/influxdata/influxdb/pull/12790): Fix bucket creation error when changing rentention rules types. -1. [12793](https://github.com/influxdata/influxdb/pull/12793): Fix task creation error when switching schedule types. -1. [12805](https://github.com/influxdata/influxdb/pull/12805): Fix hidden horizonal scrollbars in flux raw data view -1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View -1. [12961](https://github.com/influxdata/influxdb/pull/12961): Fix scroll clipping in graph legends & dropdown menus -1. [12959](https://github.com/influxdata/influxdb/pull/12959): Fix routing loop - -### UI Improvements - -1. [12782](https://github.com/influxdata/influxdb/pull/12782): Move bucket selection in the query builder to the first card in the list -1. [12850](https://github.com/influxdata/influxdb/pull/12850): Ensure editor is automatically focused in note editor -1. [12915](https://github.com/influxdata/influxdb/pull/12915): Add ability to edit a template's name. - -## v2.0.0-alpha.6 [2019-03-15] - -### Release Notes - -We have updated the way we do predefined dashboards to [include Templates](https://github.com/influxdata/influxdb/pull/12532) in this release which will cause existing Organizations to not have a System dashboard created when they build a new Telegraf configuration. In order to get this functionality, remove your existing data and start from scratch. - -**NOTE: This will remove all data from your InfluxDB v2.0 instance including timeseries data.** - -On most `linux` systems including `macOS`: - -```sh -$ rm -r ~/.influxdbv2 -``` - -Once completed, `v2.0.0-alpha.6` can be started. - -### Features - -1. [12496](https://github.com/influxdata/influxdb/pull/12496): Add ability to import a dashboard -1. [12524](https://github.com/influxdata/influxdb/pull/12524): Add ability to import a dashboard from org view -1. [12531](https://github.com/influxdata/influxdb/pull/12531): Add ability to export a dashboard and a task -1. [12615](https://github.com/influxdata/influxdb/pull/12615): Add `run` subcommand to influxd binary. This is also the default when no subcommand is specified. -1. [12523](https://github.com/influxdata/influxdb/pull/12523): Add ability to save a query as a variable from the Data Explorer. -1. [12532](https://github.com/influxdata/influxdb/pull/12532): Add System template on onboarding - -### Bug Fixes - -1. [12641](https://github.com/influxdata/influxdb/pull/12641): Stop scrollbars from covering text in flux editor - -### UI Improvements - -1. [12610](https://github.com/influxdata/influxdb/pull/12610): Fine tune keyboard interactions for managing labels from a resource card - -## v2.0.0-alpha.5 [2019-03-08] - -### Release Notes - -This release includes a [breaking change](https://github.com/influxdata/influxdb/pull/12391) to the format that TSM and index data are stored on disk. -Any existing local data will not be queryable once InfluxDB is upgraded to this release. -Prior to installing this release we recommend all storage-engine data is removed from your local InfluxDB `2.x` installation; this can be achieved without losing any of your other InfluxDB `2.x` data (settings etc). -To remove only local storage data, run the following in a terminal. - -On most `linux` systems: - -```sh - -# Replace with your actual username. - -$ rm -r /home//.influxdbv2/engine -``` - -On `macOS`: - -```sh -# Replace with your actual username. - -$ rm -r /Users//.influxdbv2/engine -``` - -Once completed, `v2.0.0-alpha.5` can be started. - -### Features - -1. [12096](https://github.com/influxdata/influxdb/pull/12096): Add labels to cloned tasks -1. [12111](https://github.com/influxdata/influxdb/pull/12111): Add ability to filter resources by clicking a label -1. [12401](https://github.com/influxdata/influxdb/pull/12401): Add ability to add a member to org -1. [12391](https://github.com/influxdata/influxdb/pull/12391): Improve representation of TSM tagsets on disk -1. [12437](https://github.com/influxdata/influxdb/pull/12437): Add ability to remove a member from org - -### Bug Fixes - -1. [12302](https://github.com/influxdata/influxdb/pull/12302): Prevent clipping of code snippets in Firefox -1. [12379](https://github.com/influxdata/influxdb/pull/12379): Prevent clipping of cell edit menus in dashboards - -### UI Improvements - -1. [12302](https://github.com/influxdata/influxdb/pull/12302): Make code snippet copy functionality easier to use -1. [12304](https://github.com/influxdata/influxdb/pull/12304): Always show live preview in Note Cell editor -1. [12317](https://github.com/influxdata/influxdb/pull/12317): Redesign Create Scraper workflow -1. [12317](https://github.com/influxdata/influxdb/pull/12317): Show warning in Telegrafs and Scrapers lists when user has no buckets -1. [12384](https://github.com/influxdata/influxdb/pull/12384): Streamline label addition, removal, and creation from the dashboards list -1. [12464](https://github.com/influxdata/influxdb/pull/12464): Improve label color selection - -## v2.0.0-alpha.4 [2019-02-21] - -### Features - -1. [11954](https://github.com/influxdata/influxdb/pull/11954): Add the ability to run a task manually from tasks page -1. [11990](https://github.com/influxdata/influxdb/pull/11990): Add the ability to select a custom time range in explorer and dashboard -1. [12009](https://github.com/influxdata/influxdb/pull/12009): Display the version information on the login page -1. [12011](https://github.com/influxdata/influxdb/pull/12011): Add the ability to update a Variable's name and query. -1. [12026](https://github.com/influxdata/influxdb/pull/12026): Add labels to cloned dashboard -1. [12018](https://github.com/influxdata/influxdb/pull/12057): Add ability filter resources by label name -1. [11973](https://github.com/influxdata/influxdb/pull/11973): Add ability to create or add labels to a resource from labels editor - -### Bug Fixes - -1. [11997](https://github.com/influxdata/influxdb/pull/11997): Update the bucket retention policy to update the time in seconds - -### UI Improvements - -1. [12016](https://github.com/influxdata/influxdb/pull/12016): Update the preview in the label overlays to be shorter -1. [12012](https://github.com/influxdata/influxdb/pull/12012): Add notifications to scrapers page for created/deleted/updated scrapers -1. [12023](https://github.com/influxdata/influxdb/pull/12023): Add notifications to buckets page for created/deleted/updated buckets -1. [12072](https://github.com/influxdata/influxdb/pull/12072): Update the admin page to display error for password length - -## v2.0.0-alpha.3 [2019-02-15] - -### Features - -1. [11809](https://github.com/influxdata/influxdb/pull/11809): Add the ability to name a scraper target -1. [11821](https://github.com/influxdata/influxdb/pull/11821): Display scraper name as the first and only updatable column in scrapers list -1. [11804](https://github.com/influxdata/influxdb/pull/11804): Add the ability to view runs for a task -1. [11824](https://github.com/influxdata/influxdb/pull/11824): Display last completed run for tasks list -1. [11836](https://github.com/influxdata/influxdb/pull/11836): Add the ability to view the logs for a specific task run - -### Bug Fixes - -1. [11819](https://github.com/influxdata/influxdb/pull/11819): Update the inline edit for resource names to guard for empty strings -1. [11852](https://github.com/influxdata/influxdb/pull/11852): Prevent a new template dashboard from being created on every telegraf config update -1. [11848](https://github.com/influxdata/influxdb/pull/11848): Fix overlapping buttons in the telegrafs verify data step - -### UI Improvements - -1. [11764](https://github.com/influxdata/influxdb/pull/11764): Move the download telegraf config button to view config overlay -1. [11879](https://github.com/influxdata/influxdb/pull/11879): Combine permissions for user by type -1. [11938](https://github.com/influxdata/influxdb/pull/11938): Add ordering to UI list items - -## v2.0.0-alpha.2 [2019-02-07] - -### Features - -1. [11677](https://github.com/influxdata/influxdb/pull/11677): Add instructions button to view `$INFLUX_TOKEN` setup for telegraf configs -1. [11693](https://github.com/influxdata/influxdb/pull/11693): Save the \$INFLUX_TOKEN environmental variable in telegraf configs -1. [11700](https://github.com/influxdata/influxdb/pull/11700): Update Tasks tab on Org page to look like Tasks Page -1. [11740](https://github.com/influxdata/influxdb/pull/11740): Add view button to view the telegraf config toml -1. [11522](https://github.com/influxdata/influxdb/pull/11522): Add plugin information step to allow for config naming and configure one plugin at a time -1. [11758](https://github.com/influxdata/influxdb/pull/11758): Update Dashboards tab on Org page to look like Dashboards Page -1. [11810](https://github.com/influxdata/influxdb/pull/11810): Add tab for template variables under organizations page - -## Bug Fixes - -1. [11678](https://github.com/influxdata/influxdb/pull/11678): Update the System Telegraf Plugin bundle to include the swap plugin -1. [11722](https://github.com/influxdata/influxdb/pull/11722): Revert behavior allowing users to create authorizations on behalf of another user - -### UI Improvements - -1. [11683](https://github.com/influxdata/influxdb/pull/11683): Change the wording for the plugin config form button to Done -1. [11689](https://github.com/influxdata/influxdb/pull/11689): Change the wording for the Collectors configure step button to Create and Verify -1. [11697](https://github.com/influxdata/influxdb/pull/11697): Standardize page loading spinner styles -1. [11711](https://github.com/influxdata/influxdb/pull/11711): Show checkbox on Save As button in data explorer -1. [11705](https://github.com/influxdata/influxdb/pull/11705): Make collectors plugins side bar visible in only the configure step -1. [11745](https://github.com/influxdata/influxdb/pull/11745): Swap retention policies on Create bucket page - -## v2.0.0-alpha.1 [2019-01-23] - -### Release Notes - -This is the initial alpha release of InfluxDB 2.0. +https://dl.influxdata.com/platform/nightlies/master/CHANGELOG.md diff --git a/CHANGELOG_OLD.md b/CHANGELOG_OLD.md new file mode 100644 index 00000000000..1f26c3d77ac --- /dev/null +++ b/CHANGELOG_OLD.md @@ -0,0 +1,1478 @@ +## unreleased + +### Go Version + +This release upgrades the project to `go` version 1.17. + +#### Minimum macOS Version + +Because of the version bump to `go`, the macOS build for this release requires at least version 10.13 High Sierra to run. + +### `influx` CLI moved to separate repository + +The `influx` CLI has been moved to its [own GitHub repository](https://github.com/influxdata/influx-cli/). Release artifacts +produced by `influxdb` are impacted as follows: + +* Release archives (`.tar.gz` and `.zip`) no longer contain the `influx` binary. +* The `influxdb2` package (`.deb` and `.rpm`) no longer contains the `influx` binary. Instead, it declares a recommended + dependency on the new `influx-cli` package. +* The `quay.io/influxdb/influxdb` image no longer contains the `influx` binary. Users are recommended to migrate to the + `influxdb` image hosted in DockerHub. + +With this change, versions of the `influx` CLI and `influxd` server are not guaranteed to exactly match. Please use +`influxd version` or `curl /health` when checking the version of the installed/running server. + +### Notebooks and Annotations + +Support for Notebooks and Annotations is included with this release. + +### SQLite Metadata Store + +This release adds an embedded SQLite database for storing metadata required by the latest UI features like Notebooks and Annotations. + +### Features + +1. [19811](https://github.com/influxdata/influxdb/pull/19811): Add Geo graph type to be able to store in Dashboard cells. +1. [21218](https://github.com/influxdata/influxdb/pull/21218): Add the properties of a static legend for line graphs and band plots. +1. [21367](https://github.com/influxdata/influxdb/pull/21367): List users via the API now supports pagination +1. [21531](https://github.com/influxdata/influxdb/pull/21531): Remove feature flags for permanent UI features +1. [21543](https://github.com/influxdata/influxdb/pull/21543): Added `influxd` configuration flag `--sqlite-path` for specifying a user-defined path to the SQLite database file +1. [21543](https://github.com/influxdata/influxdb/pull/21543): Updated `influxd` configuration flag `--store` to work with string values `disk` or `memory`. Memory continues to store metadata in-memory for testing; `disk` will persist metadata to disk via bolt and SQLite +1. [21547](https://github.com/influxdata/influxdb/pull/21547): Allow hiding the tooltip independently of the static legend +1. [21584](https://github.com/influxdata/influxdb/pull/21584): Added the `api/v2/backup/metadata` endpoint for backing up both KV and SQL metadata, and the `api/v2/restore/sql` for restoring SQL metadata. +1. [21635](https://github.com/influxdata/influxdb/pull/21635): Port `influxd inspect verify-seriesfile` to 2.x +1. [21621](https://github.com/influxdata/influxdb/pull/21621): Add `storage-wal-max-concurrent-writes` config option to `influxd` to enable tuning memory pressure under heavy write load. +1. [21621](https://github.com/influxdata/influxdb/pull/21621): Add `storage-wal-max-write-delay` config option to `influxd` to prevent deadlocks when the WAL is overloaded with concurrent writes. +1. [21615](https://github.com/influxdata/influxdb/pull/21615): Ported the `influxd inspect verify-tsm` command from 1.x. +1. [21646](https://github.com/influxdata/influxdb/pull/21646): Ported the `influxd inspect verify-tombstone` command from 1.x. +1. [21761](https://github.com/influxdata/influxdb/pull/21761): Ported the `influxd inspect dump-tsm` command from 1.x. +1. [21788](https://github.com/influxdata/influxdb/pull/21788): Ported the `influxd inspect report-tsi` command from 1.x. +1. [21784](https://github.com/influxdata/influxdb/pull/21784): Ported the `influxd inspect dumptsi` command from 1.x. +1. [21786](https://github.com/influxdata/influxdb/pull/21786): Ported the `influxd inspect deletetsm` command from 1.x. +1. [21888](https://github.com/influxdata/influxdb/pull/21888): Ported the `influxd inspect dump-wal` command from 1.x. +1. [21828](https://github.com/influxdata/influxdb/pull/21828): Added the command `influx inspect verify-wal`. +1. [21814](https://github.com/influxdata/influxdb/pull/21814): Ported the `influxd inspect report-tsm` command from 1.x. +1. [21936](https://github.com/influxdata/influxdb/pull/21936): Ported the `influxd inspect build-tsi` command from 1.x. +1. [21938](https://github.com/influxdata/influxdb/pull/21938): Added route to delete individual secret. +1. [21972](https://github.com/influxdata/influxdb/pull/21972): Added support for notebooks and annotations. +1. [22072](https://github.com/influxdata/influxdb/pull/22072): Added `--flux-log-enabled` option to `influxd` to show detail logs for flux queries. +1. [22135](https://github.com/influxdata/influxdb/pull/22135): Added route to return known resources. +1. [22311](https://github.com/influxdata/influxdb/pull/22311): Add `storage-no-validate-field-size` config to `influxd` to disable enforcement of max field size. +1. [22316](https://github.com/influxdata/influxdb/pull/22316): Optimize series iteration for queries that can be answered without inspecting TSM data. +1. [22322](https://github.com/influxdata/influxdb/pull/22322): Add support for `merge_hll`, `sum_hll`, and `count_hll` in InfluxQL. + +### Bug Fixes + +1. [21648](https://github.com/influxdata/influxdb/pull/21648): Change static legend's `hide` to `show` to let users decide if they want it. +1. [22186](https://github.com/influxdata/influxdb/pull/22186): Preserve comments in flux queries when saving task definitions. +1. [22228](https://github.com/influxdata/influxdb/pull/22228): influxdb2 packages should depend on curl. +1. [22211](https://github.com/influxdata/influxdb/pull/22211): Prevent scheduling an inactivated task after updating it. +1. [22235](https://github.com/influxdata/influxdb/pull/22235): Avoid compaction queue stats flutter. +1. [22272](https://github.com/influxdata/influxdb/pull/22272): Requests to `/api/v2/authorizations` filter correctly on `org` and `user` parameters. +1. [22311](https://github.com/influxdata/influxdb/pull/22311): Enforce max field size while parsing line protocol. +1. [22334](https://github.com/influxdata/influxdb/pull/22334): Periodically compact old and large TSI files. + +## v2.0.8 [2021-08-13] + +### Go Version + +This release upgrades the project to `go` version 1.16. + +#### Minimum macOS Version + +Because of the version bump to `go`, the macOS build for this release requires at least version 10.12 Sierra to run. + +### Features + +1. [21910](https://github.com/influxdata/influxdb/pull/21910): Added `--ui-disabled` option to `influxd` to allow for running with the UI disabled. +1. [21958](https://github.com/influxdata/influxdb/pull/21958): Telemetry improvements: Do not record telemetry data for non-existant paths; replace invalid static asset paths with a slug. +1. [22023](https://github.com/influxdata/influxdb/pull/22023): Upgrade Flux to v0.124.0. + +### Bug Fixes + +1. [21610](https://github.com/influxdata/influxdb/pull/21610): Avoid rewriting `fields.idx` unnecessarily. +1. [21662](https://github.com/influxdata/influxdb/pull/21662): Do not close connection twice in DigestWithOptions. +1. [21691](https://github.com/influxdata/influxdb/pull/21691): Remove incorrect optimization for group-by. +1. [21747](https://github.com/influxdata/influxdb/pull/21747): Rename arm rpms with yum-compatible names. +1. [21800](https://github.com/influxdata/influxdb/pull/21800): Return an error instead of panicking when InfluxQL statement rewrites fail. +1. [21802](https://github.com/influxdata/influxdb/pull/21802): Removed unused `chronograf-migator` package & chronograf API service, and updated various "chronograf" references. +1. [21839](https://github.com/influxdata/influxdb/pull/21839): Fix display and parsing of `influxd upgrade` CLI prompts in PowerShell. +1. [21840](https://github.com/influxdata/influxdb/pull/21840): Migrate restored KV snapshots to latest schema before using them. +1. [21844](https://github.com/influxdata/influxdb/pull/21844): Upgrade to latest version of `influxdata/cron` so that tasks can be created with interval of `every: 1w`. +1. [21849](https://github.com/influxdata/influxdb/pull/21849): Specify which fields are missing when rejecting an incomplete onboarding request. +1. [21850](https://github.com/influxdata/influxdb/pull/21850): Systemd unit should block on startup until http endpoint is ready. +1. [21925](https://github.com/influxdata/influxdb/pull/21925): Upgrade to golang-jwt 3.2.1.. +1. [21946](https://github.com/influxdata/influxdb/pull/21946): Prevent silently dropped writes when there are overlapping shards. +1. [21950](https://github.com/influxdata/influxdb/pull/21950): Invalid requests to /api/v2 subroutes now return 404 instead of a list of links. +1. [21962](https://github.com/influxdata/influxdb/pull/21962): Flux metaqueries for `_field` take fast path if `_measurement` is the only predicate. +1. [22059](https://github.com/influxdata/influxdb/pull/22059): Copy names from mmapped memory before closing iterator. +1. [22174](https://github.com/influxdata/influxdb/pull/22174): systemd service -- handle 40x and block indefinitely. + +## v2.0.7 [2021-06-04] + +### Features + +1. [21539](https://github.com/influxdata/influxdb/pull/21539): Upgrade Flux to v0.117.0. +1. [21519](https://github.com/influxdata/influxdb/pull/21519): Optimize `table.fill()` execution within Flux aggregate windows. + +### Bug Fixes + +1. [21318](https://github.com/influxdata/influxdb/pull/21318): Fix off-by-one error in query range calculation over partially compacted data. +1. [21345](https://github.com/influxdata/influxdb/pull/21345): Deprecate the unsupported `PostSetupUser` API. +1. [21356](https://github.com/influxdata/influxdb/pull/21356): Disable MergeFiltersRule until it is more stable. +1. [21369](https://github.com/influxdata/influxdb/pull/21369): Add limits to the `/api/v2/delete` endpoint for start and stop times with error messages. +1. [21375](https://github.com/influxdata/influxdb/pull/21375): Add logging to NATS streaming server to help debug startup failures. +1. [21477](https://github.com/influxdata/influxdb/pull/21477): Accept `--input` instead of a positional arg in `influx restore`. +1. [21477](https://github.com/influxdata/influxdb/pull/21477): Print error instead of panicking when `influx restore` fails to find backup manifests. +1. [21481](https://github.com/influxdata/influxdb/pull/21481): Set last-modified time of empty shard directory to the directory's mod time instead of Unix epoch. +1. [21486](https://github.com/influxdata/influxdb/pull/21486): Remove erroneous dependency on istio. +1. [21522](https://github.com/influxdata/influxdb/pull/21522): Replace telemetry file name with slug for `ttf`, `woff`, and `eot` files. +1. [21540](https://github.com/influxdata/influxdb/pull/21540): Enable use of absolute path for `--upgrade-log` when running `influxd upgrade` on Windows. +1. [21545](https://github.com/influxdata/influxdb/pull/21545): Make InfluxQL meta queries respect query timeouts. + +## v2.0.6 [2021-04-29] + +### Bug Fixes + +1. [21321](https://github.com/influxdata/influxdb/pull/21321): Ensure query config written by influxd upgrade is valid. +1. [21324](https://github.com/influxdata/influxdb/pull/21324): Revert to nonzero defaults for `query-concurrency` and `query-queue-size` to avoid validation failures for upgrading users. +1. [21324](https://github.com/influxdata/influxdb/pull/21324): Don't fail validation when `query-concurrency` is 0 and `query-queue-size` is > 0. + +## v2.0.5 [2021-04-27] + +### Windows Support + +This release includes our initial Windows preview build. + +### Breaking Changes + +#### /debug/vars removed + +Prior to this release, the `influxd` server would always expose profiling information over `/debug/vars`. +This endpoint was unauthenticated, and not used by InfluxDB systems to report diagnostics. For security and clarity, +the endpoint has been removed. Use the `/metrics` endpoint to collect system statistics. + +#### `influx transpile` removed + +The `transpile` command has been retired. Users can send InfluxQL directly to the server via the `/api/v2/query` +or `/query` HTTP endpoints. + +#### Default query concurrency changed + +The default setting for the max number of concurrent Flux queries has been changed from 10 to unlimited. Set the +`query-concurrency` config parameter to > 0 when running `influxd` to re-limit the maximum running query count, +and the `query-queue-size` config parameter to > 0 to set the max number of queries that can be queued before the +server starts rejecting requests. + +#### Prefix for query-controller metrics changed + +The prefix used for Prometheus metrics from the query controller has changed from `query_control_` to `qc_`. + +### Features + +1. [20621](https://github.com/influxdata/influxdb/pull/20621): Add Swift client library to the data loading section of the UI. +1. [20307](https://github.com/influxdata/influxdb/pull/20307): Add `influx task retry-failed` command to rerun failed runs. +1. [20759](https://github.com/influxdata/influxdb/pull/20759): Add additional properties for Mosaic Graph. +1. [20763](https://github.com/influxdata/influxdb/pull/20763): Add `--compression` option to `influx write` to support GZIP inputs. +1. [20827](https://github.com/influxdata/influxdb/pull/20827): Add `--pprof-disabled` option to `influxd` to disable exposing profiling information over HTTP. +1. [20827](https://github.com/influxdata/influxdb/pull/20827): Add `/debug/pprof/all` HTTP endpoint to gather all profiles at once. +1. [20827](https://github.com/influxdata/influxdb/pull/20827): Upgrade `http.pprof-enabled` config in `influxd upgrade`. +1. [20911](https://github.com/influxdata/influxdb/pull/20911): Add support for explicitly setting shard-group durations on buckets. Thanks @hinst! +1. [20882](https://github.com/influxdata/influxdb/pull/20882): Rewrite regex conditions in InfluxQL subqueries for performance. Thanks @yujiahaol68! +1. [20963](https://github.com/influxdata/influxdb/pull/20963): Add `--metrics-disabled` option to `influxd` to disable exposing Prometheus metrics over HTTP. +1. [20971](https://github.com/influxdata/influxdb/pull/20971): Add `--http-read-header-timeout`, `--http-read-timeout`, `--http-write-timeout`, and `--http-idle-timeout` options to `influxd`. +1. [20971](https://github.com/influxdata/influxdb/pull/20971): Set a default `--http-read-header-timeout` of 10s in `influxd`. +1. [20971](https://github.com/influxdata/influxdb/pull/20971): Set a default `--http-idle-timeout` of 3m in `influxd`. +1. [20861](https://github.com/influxdata/influxdb/pull/20861): Update Telegraf plugins in UI to include additions and changes in 1.18 release. +1. [20894](https://github.com/influxdata/influxdb/pull/20894): Display task IDs in the UI. +1. [21046](https://github.com/influxdata/influxdb/pull/21046): Write to standard out when `--output-path -` is passed to `influxd inspect export-lp`. +1. [21006](https://github.com/influxdata/influxdb/pull/21006): Add `-p, --profilers` flag to `influx query` command. +1. [21090](https://github.com/influxdata/influxdb/pull/21090): Update UI to match InfluxDB Cloud. +1. [21127](https://github.com/influxdata/influxdb/pull/21127): Allow for disabling concurrency-limits in Flux controller. +1. [21158](https://github.com/influxdata/influxdb/pull/21158): Replace unique resource IDs (UI assets, backup shards) with slugs to reduce cardinality of telemetry data. +1. [21235](https://github.com/influxdata/influxdb/pull/21235): HTTP server errors output logs following the standard format. +1. [21255](https://github.com/influxdata/influxdb/pull/21255): Upgrade Flux to v0.113.0. +1. [21364](https://github.com/influxdata/influxdb/pull/21364): Update Static Legend properties to allow disabling without nulling + +### Bug Fixes + +1. [20705](https://github.com/influxdata/influxdb/pull/20705): Repair swagger to match implementation of DBRPs type. +1. [19936](https://github.com/influxdata/influxdb/pull/19936): Fix use-after-free bug in series ID iterator. Thanks @foobar! +1. [20585](https://github.com/influxdata/influxdb/pull/20585): Fix TSM WAL segement size check. Thanks @foobar! +1. [20754](https://github.com/influxdata/influxdb/pull/20754): Update references to docs site to use current URLs. +1. [20773](https://github.com/influxdata/influxdb/pull/20773): Fix data race in TSM engine when inspecting tombstone stats. +1. [20797](https://github.com/influxdata/influxdb/pull/20797): Fix data race in TSM cache. Thanks @StoneYunZhao! +1. [20811](https://github.com/influxdata/influxdb/pull/20811): Fix TSM WAL segment size computing. Thanks @StoneYunZhao! +1. [20798](https://github.com/influxdata/influxdb/pull/20798): Deprecate misleading `retentionPeriodHrs` key in onboarding API. +1. [20819](https://github.com/influxdata/influxdb/pull/20819): Fix Single Stat graphs with thresholds crashing on negative values. +1. [20809](https://github.com/influxdata/influxdb/pull/20809): Fix InfluxDB port in Flux function UI examples. Thanks @sunjincheng121! +1. [20827](https://github.com/influxdata/influxdb/pull/20827): Remove unauthenticated, unsupported `/debug/vars` HTTP endpoint. +1. [20856](https://github.com/influxdata/influxdb/pull/20856): Respect 24 hour clock formats in the UI and allow more choices +1. [20875](https://github.com/influxdata/influxdb/pull/20875): Prevent "do not have an execution context" error when parsing Flux options in tasks. +1. [20932](https://github.com/influxdata/influxdb/pull/20932): Prevent time field names from being formatted in the Table visualization +1. [20929](https://github.com/influxdata/influxdb/pull/20929): Log error details when `influxd upgrade` fails to migrate databases. +1. [20921](https://github.com/influxdata/influxdb/pull/20921): Fix the cipher suite used when TLS strict ciphers are enabled in `influxd`. +1. [20925](https://github.com/influxdata/influxdb/pull/20925): Fix parse error in UI for tag filters containing regex meta characters. +1. [21042](https://github.com/influxdata/influxdb/pull/21042): Prevent concurrent access panic when gathering bolt metrics. +1. [21127](https://github.com/influxdata/influxdb/pull/21127): Fix race condition in Flux controller shutdown. +1. [21228](https://github.com/influxdata/influxdb/pull/21228): Reduce lock contention when adding new fields and measurements. +1. [21232](https://github.com/influxdata/influxdb/pull/21232): Escape dots in community templates hostname regex. +1. [21140](https://github.com/influxdata/influxdb/pull/21140): Use descending cursor when needed in pushed-down aggregate Flux queries. + +## v2.0.4 [2021-02-08] + +### Docker + +#### ARM64 + +This release extends the Docker builds hosted in `quay.io` to support the `linux/arm64` platform. + +#### 2.x nightly images + +Prior to this release, competing nightly builds caused the `nightly` Docker tag to contain outdated +binaries. This conflict has been fixed, and the image tagged with `nightly` will now contain `2.x` +binaries built from the `HEAD` of the `master` branch. + +### Breaking Changes + +#### inmem index option removed + +This release fully removes the `inmem` indexing option, along with the associated config options: + +- `max-series-per-database` +- `max-values-per-tag` + +Replacement `tsi1` indexes will be automatically generated on startup for shards that need it. + +#### Artifact naming conventions + +The names of artifacts produced by our nightly & release builds have been updated according to the +[Google developer guidelines](https://developers.google.com/style/filenames). Underscores (`_`) have +been replaced by hyphens (`-`) in nearly all cases; the one exception is the use of `x86_64` in our +RPM packages, which has been left unchanged. + +### Features + +1. [20473](https://github.com/influxdata/influxdb/pull/20473): Add `--overwrite-existing-v2` flag to `influxd upgrade` to overwrite existing files at output paths (instead of aborting). +1. [20524](https://github.com/influxdata/influxdb/pull/20524): Add `influxd print-config` command to support automated config inspection. +1. [20561](https://github.com/influxdata/influxdb/pull/20561): Add `nats-port` config option for `influxd` server. +1. [20564](https://github.com/influxdata/influxdb/pull/20564): Add `nats-max-payload-bytes` config option for `influxd` server. +1. [20467](https://github.com/influxdata/influxdb/pull/20467): Add `influxd inspect export-lp` command to extract data in line-protocol format. +1. [20604](https://github.com/influxdata/influxdb/pull/20604): Update telegraf plugins list in UI to include Beat, Intel PowerStats, and Rienmann. +1. [20634](https://github.com/influxdata/influxdb/pull/20634): Promote schema and fill query optimizations to default behavior. +1. [20678](https://github.com/influxdata/influxdb/pull/20678): Upgrade Flux to v0.104.0 +1. [20680](https://github.com/influxdata/influxdb/pull/20680): UI: Upgrade flux-lsp-browser to v0.5.31 + +### Bug Fixes + +1. [20339](https://github.com/influxdata/influxdb/pull/20339): Include upgrade helper script in goreleaser manifest. +1. [20348](https://github.com/influxdata/influxdb/pull/20348): Don't show the upgrade notice on fresh `influxdb2` installs. +1. [20348](https://github.com/influxdata/influxdb/pull/20348): Ensure `config.toml` is initialized on fresh `influxdb2` installs. +1. [20349](https://github.com/influxdata/influxdb/pull/20349): Ensure `influxdb` service sees default env variables when running under `init.d`. +1. [20317](https://github.com/influxdata/influxdb/pull/20317): Don't ignore failures to set password during initial user onboarding. +1. [20362](https://github.com/influxdata/influxdb/pull/20362): Don't overwrite stack name/description on `influx stack update`. +1. [20355](https://github.com/influxdata/influxdb/pull/20355): Fix timeout setup for `influxd` graceful shutdown. +1. [20387](https://github.com/influxdata/influxdb/pull/20387): Improve error message shown when `influx` CLI can't find an org by name. +1. [20380](https://github.com/influxdata/influxdb/pull/20380): Remove duplication from task error messages. +1. [20313](https://github.com/influxdata/influxdb/pull/20313): Automatically build `tsi1` indexes for shards that need it instead of falling back to `inmem`. +1. [20313](https://github.com/influxdata/influxdb/pull/20313): Fix logging initialization for storage engine. +1. [20442](https://github.com/influxdata/influxdb/pull/20442): Don't return 500 codes for partial write failures. +1. [20440](https://github.com/influxdata/influxdb/pull/20440): Add confirmation step w/ file sizes before copying data files in `influxd upgrade`. +1. [20409](https://github.com/influxdata/influxdb/pull/20409): Improve messages in DBRP API validation errors. +1. [20489](https://github.com/influxdata/influxdb/pull/20489): Improve error message when opening BoltDB with unsupported file system options. +1. [20490](https://github.com/influxdata/influxdb/pull/20490): Fix silent failure to register CLI args as required. +1. [20522](https://github.com/influxdata/influxdb/pull/20522): Fix loading config when INFLUXD_CONFIG_PATH points to a `.yml` file. +1. [20527](https://github.com/influxdata/influxdb/pull/20527): Don't leak .tmp files while backing up shards. +1. [20527](https://github.com/influxdata/influxdb/pull/20527): Allow backups to complete while a snapshot is in progress. +1. [20539](https://github.com/influxdata/influxdb/pull/20539): Prevent extra output row from GROUP BY crossing DST boundary. +1. [20548](https://github.com/influxdata/influxdb/pull/20548): Prevent panic in `influxd upgrade` when V1 users exist and no V1 config is given. +1. [20565](https://github.com/influxdata/influxdb/pull/20565): Set correct Content-Type on v1 query responses. +1. [20565](https://github.com/influxdata/influxdb/pull/20565): Update V1 API spec to document all valid Accept headers and matching Content-Types. +1. [20578](https://github.com/influxdata/influxdb/pull/20578): Respect the --skip-verify flag when running `influx query`. +1. [20495](https://github.com/influxdata/influxdb/pull/20495): Update Flux functions list in UI to reflect that `v1` package was renamed to `schema`. +1. [20669](https://github.com/influxdata/influxdb/pull/20669): Remove blank lines from payloads sent by `influx write`. +1. [20657](https://github.com/influxdata/influxdb/pull/20657): Allow for creating users without initial passwords in `influx user create`. +1. [20679](https://github.com/influxdata/influxdb/pull/20679): Fix incorrect "bucket not found" errors when passing `--bucket-id` to `influx write`. +1. [20702](https://github.com/influxdata/influxdb/pull/20702): Fix loading config when `INFLUXD_CONFIG_PATH` points to a directory with `.` in its name. +1. [20678](https://github.com/influxdata/influxdb/pull/20678): Fix infinite loop in Flux parser caused by invalid array expressions. +1. [20360](https://github.com/influxdata/influxdb/pull/20360): Update API spec to document Flux dictionary features. + +## v2.0.3 [2020-12-14] + +### ARM Support + +This release includes our initial ARM64 preview build. + +### Breaking Changes + +#### influxd upgrade + +Previously, `influxd upgrade` would attempt to write upgraded `config.toml` files into the same directory as the source +`influxdb.conf` file. If this failed, a warning would be logged and `config.toml` would be written into the `HOME` directory. + +This release breaks this behavior in two ways: + +1. By default, `config.toml` is now written into the same directory as the Bolt DB and engine files (`~/.influxdbv2/`) +2. If writing upgraded config fails, the `upgrade` process exits with an error instead of falling back to the `HOME` directory + +Users can use the new `--v2-config-path` option to override the output path for upgraded config if they can't or don't +want to use the default. + +#### v2 packaging + +Based on community feedback, the v2 deb and rpm packaging has been improved to avoid confusion between versions. The package +name is now influxdb2 and conflicts with any previous influxdb package (including initial 2.0.0, 2.0.1, and 2.0.2 packages). +Additionally, v2 specific path defaults are now defined and helper scripts are provided for `influxd upgrade` and cleanup cases. + +### Features + +1. [20123](https://github.com/influxdata/influxdb/pull/20123): Allow password to be specified as a CLI option in `influx v1 auth create`. +1. [20123](https://github.com/influxdata/influxdb/pull/20123): Allow password to be specified as a CLI option in `influx v1 auth set-password`. +1. [20110](https://github.com/influxdata/influxdb/pull/20110): Allow for users to specify where V2 config should be written in `influxd upgrade`. +1. [20204](https://github.com/influxdata/influxdb/pull/20204): Improve ID-related error messages for `influx v1 dbrp` commands. +1. [20236](https://github.com/influxdata/influxdb/pull/20236): Delete with predicate. +1. [20322](https://github.com/influxdata/influxdb/pull/20322): Upgrade Flux to v0.99.0. +1. [20327](https://github.com/influxdata/influxdb/pull/20327): Upgrade flux-lsp-browser to v0.5.26. + +### Bug Fixes + +1. [20110](https://github.com/influxdata/influxdb/pull/20110): Use V2 directory for default V2 config path in `influxd upgrade`. +1. [20137](https://github.com/influxdata/influxdb/pull/20137): Fix panic when writing a point with 100 tags. Thanks @foobar! +1. [20151](https://github.com/influxdata/influxdb/pull/20151): Don't log bodies of V1 write requests. +1. [20097](https://github.com/influxdata/influxdb/pull/20097): Ensure Index.Walk fetches matching foreign keys only. +1. [20149](https://github.com/influxdata/influxdb/pull/20149): Enforce max value of 2147483647 on query concurrency to avoid startup panic. +1. [20149](https://github.com/influxdata/influxdb/pull/20149): Enforce max value of 2147483647 on query queue size to avoid startup panic. +1. [20168](https://github.com/influxdata/influxdb/pull/20168): Auto-migrate existing DBRP mappings from old schema to avoid panic. +1. [20201](https://github.com/influxdata/influxdb/pull/20201): Optimize shard lookup in groups containing only one shard. Thanks @StoneYunZhao! +1. [20155](https://github.com/influxdata/influxdb/pull/20155): Respect the `--name` option in `influx setup` whether configs already exist or not. +1. [20155](https://github.com/influxdata/influxdb/pull/20155): Allow for 0 (infinite) values for `--retention` in `influx setup`. +1. [20305](https://github.com/influxdata/influxdb/pull/20305): Set v2 default paths and provide upgrade helper scripts in release packages + +## v2.0.2 [2020-11-19] + +### Features + +1. [19979](https://github.com/influxdata/influxdb/pull/19979): Added functionality to filter task runs by time. +1. [20036](https://github.com/influxdata/influxdb/pull/20036): Warn if V1 users are upgraded, but V1 auth wasn't enabled. +1. [20039](https://github.com/influxdata/influxdb/pull/20039): Export 1.x CQs as part of `influxd upgrade`. +1. [20053](https://github.com/influxdata/influxdb/pull/20053): Upgrade Flux to v0.95.0. +1. [20058](https://github.com/influxdata/influxdb/pull/20058): UI: Upgrade flux-lsp-browser to v0.5.23. +1. [20067](https://github.com/influxdata/influxdb/pull/20067): Add DBRP cli commands as `influxd v1 dbrp`. + +### Bug Fixes + +1. [19987](https://github.com/influxdata/influxdb/pull/19987): Fix various typos. Thanks @kumakichi! +1. [19991](https://github.com/influxdata/influxdb/pull/19991): Use --skip-verify flag for backup/restore CLI command. +1. [19995](https://github.com/influxdata/influxdb/pull/19995): Don't auto-print help on influxd errors +1. [20008](https://github.com/influxdata/influxdb/pull/20008): Add locking during TSI iterator creation. +1. [20012](https://github.com/influxdata/influxdb/pull/20012): Validate input paths to `influxd upgrade` up-front. +1. [20015](https://github.com/influxdata/influxdb/pull/20015): Add same site strict flag to session cookie. +1. [20017](https://github.com/influxdata/influxdb/pull/20017): Don't include duplicates for SHOW DATABASES +1. [20064](https://github.com/influxdata/influxdb/pull/20064): Ensure Flux reads across all shards. +1. [20047](https://github.com/influxdata/influxdb/pull/20047): Allow scraper to ignore insecure certificates on a target. Thanks @cmackenzie1! +1. [20076](https://github.com/influxdata/influxdb/pull/20076): Remove internal `influxd upgrade` subcommands from help text. +1. [20074](https://github.com/influxdata/influxdb/pull/20074): Use default DBRP mapping on V1 write when no RP is specified. +1. [20091](https://github.com/influxdata/influxdb/pull/20091): Make the DBRP http API match the swagger spec. + +## v2.0.1 [2020-11-10] + +### Bug Fixes + +1. [19918](https://github.com/influxdata/influxdb/pull/19918): Swagger: add operationId to /delete +1. [19967](https://github.com/influxdata/influxdb/pull/19967): Upgrade: add log-level option +1. [19969](https://github.com/influxdata/influxdb/pull/19969): Check for existing 2.x CLI configs file +1. [19971](https://github.com/influxdata/influxdb/pull/19971): Swagger: remove Invites from swagger +1. [19972](https://github.com/influxdata/influxdb/pull/19972): Remove obsolete unused option (influx-command-path) +1. [19980](https://github.com/influxdata/influxdb/pull/19980): check write permission in legacy write path + +## v2.0.0 [2020-11-09] + +### Features + +1. [19935](https://github.com/influxdata/influxdb/pull/19935): Improve the UI for the influx v1 auth commands +1. [19940](https://github.com/influxdata/influxdb/pull/19940): Update Flux to v0.94.0 +1. [19943](https://github.com/influxdata/influxdb/pull/19943): Upgrade flux-lsp-browser to v0.5.22 +1. [19946](https://github.com/influxdata/influxdb/pull/19946): Adding RAS telegraf input + +### Bug Fixes + +1. [19924](https://github.com/influxdata/influxdb/pull/19924): Remove unused 'security-script' option from upgrade command +1. [19925](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs in `influxd upgrade` +1. [19928](https://github.com/influxdata/influxdb/pull/19928): Fix parsing of retention policy CLI args in `influx setup` and `influxd upgrade` +1. [19930](https://github.com/influxdata/influxdb/pull/19930): Replace 0 with MaxInt when upgrading query-concurrency +1. [19937](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs +1. [19939](https://github.com/influxdata/influxdb/pull/19939): Make influxd help more specific +1. [19945](https://github.com/influxdata/influxdb/pull/19945): Allow write-only V1 tokens to find DBRPs +1. [19947](https://github.com/influxdata/influxdb/pull/19947): Updating v1 auth description +1. [19952](https://github.com/influxdata/influxdb/pull/19952): Use `db`/`rp` naming convention when migrating DBs to buckets +1. [19956](https://github.com/influxdata/influxdb/pull/19956): Improve help for --no-password switch +1. [19959](https://github.com/influxdata/influxdb/pull/19959): Use 10 instead of MaxInt when rewriting query-concurrency +1. [19960](https://github.com/influxdata/influxdb/pull/19960): Remove bucket and mapping auto-creation from v1 /write API +1. [19885](https://github.com/influxdata/influxdb/pull/19875): Misuse of reflect.SliceHeader + +## v2.0.0-rc.4 [2020-11-05] + +### Features + +1. [19854](https://github.com/influxdata/influxdb/pull/19854): Use v1 authorization for users upgrade +1. [19855](https://github.com/influxdata/influxdb/pull/19855): Enable window pushdowns +1. [19864](https://github.com/influxdata/influxdb/pull/19864): Implement backup/restore CLI subcommands +1. [19865](https://github.com/influxdata/influxdb/pull/19865): Implementation of v1 authorization +1. [19879](https://github.com/influxdata/influxdb/pull/19879): Make sure the query plan nodes have unique ids +1. [19881](https://github.com/influxdata/influxdb/pull/19881): Update Flux to v0.93.0 + +### Bug Fixes + +1. [19685](https://github.com/influxdata/influxdb/pull/19685): Cloning tasks makes actions shared in task list view +1. [19712](https://github.com/influxdata/influxdb/pull/19712): Reduce filesize of influx binary +1. [19819](https://github.com/influxdata/influxdb/pull/19819): Isolate telegraf config service and remove URM interactions +1. [19853](https://github.com/influxdata/influxdb/pull/19853): Use updated HTTP client for authorization service +1. [19856](https://github.com/influxdata/influxdb/pull/19856): Make tagKeys and tagValues work for edge cases involving fields +1. [19870](https://github.com/influxdata/influxdb/pull/19870): Correctly parse float as 64-bits +1. [19873](https://github.com/influxdata/influxdb/pull/19873): Add simple metrics related to installed templates +1. [19885](https://github.com/influxdata/influxdb/pull/19885): Remove extra multiplication of retention policies in onboarding +1. [19887](https://github.com/influxdata/influxdb/pull/19887): Use fluxinit package to init flux library instead of builtin +1. [19886](https://github.com/influxdata/influxdb/pull/19886): Add Logger to constructor function to ensure log field is initialized +1. [19894](https://github.com/influxdata/influxdb/pull/19894): Return empty iterator instead of null in tagValues +1. [19899](https://github.com/influxdata/influxdb/pull/19899): Docs: flux 0.92 functions +1. [19908](https://github.com/influxdata/influxdb/pull/19908): Fix /ready response content type + +## v2.0.0-rc.3 [2020-10-29] + +### Features + +1. [19807](https://github.com/influxdata/influxdb/pull/19807): Enable window agg mean pushdown +1. [19813](https://github.com/influxdata/influxdb/pull/19813): Aggregate array cursors +1. [19815](https://github.com/influxdata/influxdb/pull/19815): Create a v1 authorization service +1. [19826](https://github.com/influxdata/influxdb/pull/19826): Update FLux to v0.91.0 +1. [19829](https://github.com/influxdata/influxdb/pull/19829): Extend CLI with v1 authorization commands +1. [19839](https://github.com/influxdata/influxdb/pull/19839): Add tick generation properties and legendColorizeRows +1. [19840](https://github.com/influxdata/influxdb/pull/19840): Add bcrypt password support to v1 authorizations +1. [19850](https://github.com/influxdata/influxdb/pull/19850): Update generate ticks into an array of properties for each axis + +### Bug Fixes + +1. [19784](https://github.com/influxdata/influxdb/pull/19784): UI: bump papaparse from 4.6.3 to 5.2.0 +1. [19802](https://github.com/influxdata/influxdb/pull/19802): Docs: update PostDBRP docs to reflect mutual exclusive requirement of org vs orgID +1. [19804](https://github.com/influxdata/influxdb/pull/19804): Notifications: move rule service into own package +1. [19816](https://github.com/influxdata/influxdb/pull/19816): Type-convert fs.Bavail for portability +1. [19818](https://github.com/influxdata/influxdb/pull/19818): Notifications: isolate endpoint service +1. [19823](https://github.com/influxdata/influxdb/pull/19823): Clear Logout +1. [19825](https://github.com/influxdata/influxdb/pull/19825): Docs: Update FUZZ.md +1. [19828](https://github.com/influxdata/influxdb/pull/19828): Add 1.x compatible endpoints to swagger +1. [19833](https://github.com/influxdata/influxdb/pull/19833): allow newIndexSeriesCursor() to accept an influxql.Expr +1. [19834](https://github.com/influxdata/influxdb/pull/19834): Docs: Fix typos in http/swagger.yml +1. [19836](https://github.com/influxdata/influxdb/pull/19836): UI: import flux-lsp v0.5.21 +1. [19846](https://github.com/influxdata/influxdb/pull/19846): prune some unreferenced packages + +## v2.0.0-rc.2 [2020-10-21] + +### Features + +1. [19725](https://github.com/influxdata/influxdb/pull/19725): Add window agg result set +1. [19740](https://github.com/influxdata/influxdb/pull/19740): Provide means to remove stack without confirmation +1. [19750](https://github.com/influxdata/influxdb/pull/19750): Return error on failed resource addition +1. [19774](https://github.com/influxdata/influxdb/pull/19774): Update Flux to v0.90.0 + +### Bug Fixes + +1. [19465](https://github.com/influxdata/influxdb/pull/19465): Use valid flux in pkger test templates +1. [19773](https://github.com/influxdata/influxdb/pull/19773): Upgrade: fallback to user's home when saving upgraded config +1. [19775](https://github.com/influxdata/influxdb/pull/19775): Telegraf plugin updates (remove RAS for now) +1. [19776](https://github.com/influxdata/influxdb/pull/19776): TimeMachine: change derivative to 1s +1. [19789](https://github.com/influxdata/influxdb/pull/19789): Launcher: Switch to AuthorizationService from authorization package +1. [19780](https://github.com/influxdata/influxdb/pull/19780): Upgrade: proper default 2.x config filename +1. [19781](https://github.com/influxdata/influxdb/pull/19781): Upgrade: fixing typos and grammar errors + +## v2.0.0-rc.1 [2020-10-14] + +### Features + +1. [19641](https://github.com/influxdata/influxdb/pull/19641): Added `influx upgrade` command for upgrading from 1.x to 2.0 +1. [19746](https://github.com/influxdata/influxdb/pull/19746): Added Intel RDT and RAS Daemon telegraf plugins +1. [19731](https://github.com/influxdata/influxdb/pull/19731): Upgraded Flux to v0.89.0 + +### Bug Fixes + +1. [19708](https://github.com/influxdata/influxdb/pull/19708): Scrapers not working in RC0 +1. [19732](https://github.com/influxdata/influxdb/pull/19732): Update default value of list tasks influx CLI command to 100 +1. [19710](https://github.com/influxdata/influxdb/pull/19710): InfluxDB Templates: allow same duration unit identifiers that the tasks api allows +1. [19700](https://github.com/influxdata/influxdb/pull/19700): InfluxDB Templates: preserve cell colors on export/import +1. [19695](https://github.com/influxdata/influxdb/pull/19695): Influx CLI fix an issue where a single telegraf config was not being returned +1. [19593](https://github.com/influxdata/influxdb/pull/19593): Don't allow short passwords in `influx setup` + +## v2.0.0-rc.0 [2020-09-29] + +### Breaking Changes + +In the interests of simplifying the migration for existing users of InfluxDB 1.x, this +release includes significant breaking changes. + +**Upgrading from previous beta builds of `influxd` is not supported** + +In order to continue using `influxd` betas, users will be required to move all existing +data out of their `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. This +means all existing dashboards, tasks, integrations, alerts, users and tokens will need to +be recreated. The `influx export all` command may be used to export and re-import most +of this data. + +At this time, there is no tooling to convert existing time series data from previous +beta releases. If data from a prior beta release is found, `influxd` will refuse to start. + +We have also changed the default port of InfluxDB from 9999 back to 8086. If you still would like +to run on port 9999, you can start influxd with the `--http-bind-address` option. You will also +need to update any InfluxDB CLI config profiles with the new port number. + +1. [19446](https://github.com/influxdata/influxdb/pull/19446): Port TSM1 storage engine +1. [19494](https://github.com/influxdata/influxdb/pull/19494): Changing the default port from 9999 to 8086 +1. [19636](https://github.com/influxdata/influxdb/pull/19636): Disable unimplemented delete with predicate API + +### Features + +1. [18779](https://github.com/influxdata/influxdb/pull/18779): Add new processing options and enhancements to influx write. +1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use +1. [19334](https://github.com/influxdata/influxdb/pull/19334): Add --active-config flag to influx to set config for single command +1. [19219](https://github.com/influxdata/influxdb/pull/19219): List buckets via the API now supports after (ID) parameter as an alternative to offset. +1. [19390](https://github.com/influxdata/influxdb/pull/19390): Record last success and failure run times in the Task +1. [19402](https://github.com/influxdata/influxdb/pull/19402): Inject Task's LatestSuccess Timestamp In Flux Extern +1. [19433](https://github.com/influxdata/influxdb/pull/19433): Add option to dump raw query results in CLI +1. [19506](https://github.com/influxdata/influxdb/pull/19506): Add TSM 1.x storage options as flags +1. [19508](https://github.com/influxdata/influxdb/pull/19508): Add subset of InfluxQL coordinator options as flags +1. [19457](https://github.com/influxdata/influxdb/pull/19457): Add ability to export resources by name via the CLI +1. [19640](https://github.com/influxdata/influxdb/pull/19640): Turn on Community Templates +1. [19663](https://github.com/influxdata/influxdb/pull/19663): Added InfluxDB v2 Listener, NSD, OPC-UA, and Windows Event Log to the sources page +1. [19662](https://github.com/influxdata/influxdb/pull/19662): Add `max-line-length` switch to `influx write` command to address `token too long` errors for large inputs +1. [19660](https://github.com/influxdata/influxdb/pull/19660): Add --rate-limit option to `influx write`. +1. [19740](https://github.com/influxdata/influxdb/pull/19740): Add `--force` option to `influx stack rm` to skip confirmation + +### Bug Fixes + +1. [19331](https://github.com/influxdata/influxdb/pull/19331): Add description to auth influx command outputs. +1. [19392](https://github.com/influxdata/influxdb/pull/19392): Include the edge of the boundary we are observing. +1. [19453](https://github.com/influxdata/influxdb/pull/19453): Warn about duplicate tag names during influx write csv. +1. [19466](https://github.com/influxdata/influxdb/pull/19466): Do not override existing line part in group annotation. +1. [19637](https://github.com/influxdata/influxdb/pull/19637): Added PATCH to the list of allowed methods + +## v2.0.0-beta.16 [2020-08-07] + +### Breaking + +1. [19066](https://github.com/influxdata/influxdb/pull/19066): Drop deprecated /packages route tree +1. [19116](https://github.com/influxdata/influxdb/pull/19116): Support more types for template envRef default value and require explicit default values +1. [19104](https://github.com/influxdata/influxdb/pull/19104): Remove orgs/labels nested routes from the API. +1. [19653](https://github.com/influxdata/influxdb/pull/19653): Remove PointBatcher from tsdb package API + +### Features + +1. [19075](https://github.com/influxdata/influxdb/pull/19075): Add resource links to a stack's resources from public HTTP API list/read calls +1. [19103](https://github.com/influxdata/influxdb/pull/19103): Enhance resource creation experience when limits are reached +1. [19223](https://github.com/influxdata/influxdb/pull/19223): Add dashboards command to influx CLI +1. [19225](https://github.com/influxdata/influxdb/pull/19225): Allow user onboarding to optionally set passwords +1. [18841](https://github.com/influxdata/influxdb/pull/18841): Limit query response sizes for queries built in QueryBuilder by requiring an aggregate window +1. [19135](https://github.com/influxdata/influxdb/pull/19135): Add telegram notification. + +### Bug Fixes + +1. [19043](https://github.com/influxdata/influxdb/pull/19043): Enforce all influx CLI flag args are valid +1. [19188](https://github.com/influxdata/influxdb/pull/19188): Dashboard cells correctly map results when multiple queries exist +1. [19146](https://github.com/influxdata/influxdb/pull/19146): Dashboard cells and overlay use UTC as query time when toggling to UTC timezone +1. [19222](https://github.com/influxdata/influxdb/pull/19222): Bucket names may not include quotation marks +1. [19317](https://github.com/influxdata/influxdb/pull/19317): Add validation to Variable name creation for valid Flux identifiers. + +### UI Improvements + +1. [19231](https://github.com/influxdata/influxdb/pull/19231): Alerts page filter inputs now have tab indices for keyboard navigation +1. [19364](https://github.com/influxdata/influxdb/pull/19364): Errors in OSS are now properly printed to the console + +## v2.0.0-beta.15 [2020-07-23] + +### Breaking + +1. [19004](https://github.com/influxdata/influxdb/pull/19004): Removed the `migrate` command from the `influxd` binary. +1. [18921](https://github.com/influxdata/influxdb/pull/18921): Restricted UI variable names to not clash with Flux reserved words + +### Features + +1. [18888](https://github.com/influxdata/influxdb/pull/18888): Add event source to influx stack operations +1. [18910](https://github.com/influxdata/influxdb/pull/18910): Add uninstall functionality for stacks +1. [18912](https://github.com/influxdata/influxdb/pull/18912): Drop deprecated influx pkg command tree +1. [18997](https://github.com/influxdata/influxdb/pull/18997): Add telegraf management commands to influx CLI +1. [19030](https://github.com/influxdata/influxdb/pull/19030): Enable dynamic destination for the influx CLI configs file +1. [19029](https://github.com/influxdata/influxdb/pull/19029): Navigating away from a dashboard cancels all pending queries +1. [19003](https://github.com/influxdata/influxdb/pull/19003): Upgrade to Flux v0.74.0 +1. [19040](https://github.com/influxdata/influxdb/pull/19040): Drop the REPL command from influx CLI +1. [19032](https://github.com/influxdata/influxdb/pull/19032): Redesign asset & rate limit alerts + +### Bug Fixes + +1. [18891](https://github.com/influxdata/influxdb/pull/18891): Allow 0 to be the custom set minimum value for Y Domain +1. [18969](https://github.com/influxdata/influxdb/pull/18969): Single Stat cells should render properly in Safari again +1. [18974](https://github.com/influxdata/influxdb/pull/18974): Limit variable querying when submitting queries to used variables +1. [19039](https://github.com/influxdata/influxdb/pull/19039): Fix an issue where switching orgs was not redirecting correctly +1. [18989](https://github.com/influxdata/influxdb/pull/18989): Stopped fetching tags in the advanced builder +1. [19044](https://github.com/influxdata/influxdb/pull/19044): Graph customization: X and Y axis properly accept values + +## v2.0.0-beta.14 [2020-07-08] + +### Features + +1. [18758](https://github.com/influxdata/influxdb/pull/18758): Extend influx stacks update cmd with ability to add resources without apply template +1. [18793](https://github.com/influxdata/influxdb/pull/18793): Normalize InfluxDB templates under new /api/v2/templates and /api/v2/stacks public API +1. [18818](https://github.com/influxdata/influxdb/pull/18818): Extend template Summary and Diff nested types with kind identifiers +1. [18857](https://github.com/influxdata/influxdb/pull/18857): Flux updated to v0.71.1 +1. [18805](https://github.com/influxdata/influxdb/pull/18805): Added static builds for Linux + +### Bug Fixes + +1. [18878](https://github.com/influxdata/influxdb/pull/18878): Don't overwrite build date set via ldflags +1. [18842](https://github.com/influxdata/influxdb/pull/18842): Fixed an issue where define query was unusable after importing a Check +1. [18845](https://github.com/influxdata/influxdb/pull/18845): Update documentation links + +## v2.0.0-beta.13 [2020-06-25] + +### Features + +1. [18387](https://github.com/influxdata/influxdb/pull/18387): Integrate query cancellation after queries have been submitted +1. [18515](https://github.com/influxdata/influxdb/pull/18515): Extend templates with the source file|url|reader. +1. [18539](https://github.com/influxdata/influxdb/pull/18539): Collect stats on installed influxdata community template usage. +1. [18541](https://github.com/influxdata/influxdb/pull/18541): Pkger allow raw github.com host URLs for yaml|json|jsonnet URLs +1. [18546](https://github.com/influxdata/influxdb/pull/18546): Influx allow for files to be remotes for all template commands +1. [18560](https://github.com/influxdata/influxdb/pull/18560): Extend stacks API with update capability +1. [18568](https://github.com/influxdata/influxdb/pull/18568): Add support for config files to influxd and any cli.NewCommand use case +1. [18573](https://github.com/influxdata/influxdb/pull/18573): Extend influx stacks cmd with new influx stacks update cmd +1. [18595](https://github.com/influxdata/influxdb/pull/18595): Add ability to skip resources in a template by kind or by metadata.name +1. [18600](https://github.com/influxdata/influxdb/pull/18600): Extend influx apply with resource filter capabilities +1. [18601](https://github.com/influxdata/influxdb/pull/18601): Provide active config running influx config without args +1. [18606](https://github.com/influxdata/influxdb/pull/18606): Enable influxd binary to look for a config file on startup +1. [18647](https://github.com/influxdata/influxdb/pull/18647): Add support for env ref default values to the template parser +1. [18655](https://github.com/influxdata/influxdb/pull/18655): Add support for platform variable selected field to templates + +### Bug Fixes + +1. [18602](https://github.com/influxdata/influxdb/pull/18602): Fix uint overflow during setup on 32bit systems +1. [18623](https://github.com/influxdata/influxdb/pull/18623): Drop support for --local flag within influx CLI +1. [18632](https://github.com/influxdata/influxdb/pull/18632): Prevents undefined queries in cells from erroring out in dashboards +1. [18649](https://github.com/influxdata/influxdb/pull/18649): Fixes bucket selection issue and query builder state +1. [18658](https://github.com/influxdata/influxdb/pull/18658): Add support for 'd' day and 'w' week time identifiers in the CLI for bucket and setup commands +1. [18581](https://github.com/influxdata/influxdb/pull/18581): Cache dashboard cell query results to use as a reference for cell configurations +1. [18707](https://github.com/influxdata/influxdb/pull/18707): Validate host-url for influx config create/set commands +1. [18713](https://github.com/influxdata/influxdb/pull/18713): Fix influx CLI flags to accurately depict flags for all commands + +## v2.0.0-beta.12 [2020-06-12] + +### Features + +1. [18279](https://github.com/influxdata/influxdb/pull/18279): Make all pkg applications stateful via stacks +1. [18322](https://github.com/influxdata/influxdb/pull/18322): Add ability to export a stack's existing (as they are in the platform) resource state as a pkg +1. [18334](https://github.com/influxdata/influxdb/pull/18334): Update influx pkg commands with improved usage and examples in long form. +1. [18344](https://github.com/influxdata/influxdb/pull/18344): Extend influx CLI with version and User-Agent. +1. [18355](https://github.com/influxdata/influxdb/pull/18355): Integrate RedirectTo functionality so CLOUD users now get navigated back to the originally linked page after login +1. [18392](https://github.com/influxdata/influxdb/pull/18392): Consolidate pkg influx commands under templates. This removes some nesting of the CLI commands as part of that. +1. [18400](https://github.com/influxdata/influxdb/pull/18400): Dashboards maintain sort order after navigating away +1. [18480](https://github.com/influxdata/influxdb/pull/18480): Allows tasks to open in new tabs +1. [18553](https://github.com/influxdata/influxdb/pull/18553): Update usage and soften comparisons for kind matching on 'influx export --resourceType' cmd + +### Bug Fixes + +1. [18331](https://github.com/influxdata/influxdb/pull/18331): Support organization name in addition to ID in DBRP operations +1. [18335](https://github.com/influxdata/influxdb/pull/18335): Disable failing when providing an unexpected error to influx CLI +1. [18345](https://github.com/influxdata/influxdb/pull/18345): Have influx delete cmd respect the config +1. [18385](https://github.com/influxdata/influxdb/pull/18385): Store initialization for pkger enforced on reads +1. [18434](https://github.com/influxdata/influxdb/pull/18434): Backfill missing fillColumns field for histograms in pkger +1. [18471](https://github.com/influxdata/influxdb/pull/18471): Notifies the user how to escape presentation mode when the feature is toggled + +### UI Improvements + +1. [18319](https://github.com/influxdata/influxdb/pull/18319): Display bucket ID in bucket list and enable 1 click copying +1. [18361](https://github.com/influxdata/influxdb/pull/18361): Tokens list is now consistent with the other resource lists +1. [18346](https://github.com/influxdata/influxdb/pull/18346): Reduce the number of variables being hydrated when toggling variables +1. [18447](https://github.com/influxdata/influxdb/pull/18447): Redesign dashboard cell loading indicator to be more obvious +1. [18593](https://github.com/influxdata/influxdb/pull/18593): Add copyable User and Organization Ids to About page + +## v2.0.0-beta.11 [2020-05-26] + +### Features + +1. [18011](https://github.com/influxdata/influxdb/pull/18011): Integrate UTC dropdown when making custom time range query +1. [18040](https://github.com/influxdata/influxdb/pull/18040): Allow for min OR max y-axis visualization settings rather than min AND max +1. [17764](https://github.com/influxdata/influxdb/pull/17764): Add CSV to line protocol conversion library +1. [18059](https://github.com/influxdata/influxdb/pull/18059): Make the dropdown width adjustable +1. [18173](https://github.com/influxdata/influxdb/pull/18173): Add version to /health response + +### Bug Fixes + +1. [18066](https://github.com/influxdata/influxdb/pull/18066): Fixed bug that wasn't persisting timeFormat for Graph + Single Stat selections +1. [17959](https://github.com/influxdata/influxdb/pull/17959): Authorizer now exposes full permission set +1. [18071](https://github.com/influxdata/influxdb/pull/18071): Fixed issue that was causing variable selections to hydrate all variable values +1. [18016](https://github.com/influxdata/influxdb/pull/18016): Remove the fancy scrollbars +1. [18171](https://github.com/influxdata/influxdb/pull/18171): Check status now displaying warning if loading a large amount + +## v2.0.0-beta.10 [2020-05-07] + +### Features + +1. [17934](https://github.com/influxdata/influxdb/pull/17934): Add ability to delete a stack and all the resources associated with it +1. [17941](https://github.com/influxdata/influxdb/pull/17941): Enforce DNS name compliance on all pkger resources' metadata.name field +1. [17989](https://github.com/influxdata/influxdb/pull/17989): Add stateful pkg management with stacks +1. [18007](https://github.com/influxdata/influxdb/pull/18007): Add remove and list pkger stack commands to influx CLI +1. [18017](https://github.com/influxdata/influxdb/pull/18017): Fixup display message for interactive influx setup cmd + +### Bug Fixes + +1. [17906](https://github.com/influxdata/influxdb/pull/17906): Ensure UpdateUser cleans up the index when updating names +1. [17933](https://github.com/influxdata/influxdb/pull/17933): Ensure Checks can be set for zero values + +### UI Improvements + +1. [17860](https://github.com/influxdata/influxdb/pull/17860): Allow bucket creation from the Data Explorer and Cell Editor + +## v2.0.0-beta.9 [2020-04-23] + +### Features + +1. [17851](https://github.com/influxdata/influxdb/pull/17851): Add feature flag package capability and flags endpoint + +### Bug Fixes + +1. [17618](https://github.com/influxdata/influxdb/pull/17618): Add index for URM by user ID to improve lookup performance +1. [17751](https://github.com/influxdata/influxdb/pull/17751): Existing session expiration time is respected on session renewal +1. [17817](https://github.com/influxdata/influxdb/pull/17817): Make CLI respect env vars and flags in addition to the configs and extend support for config orgs to all commands + +### UI Improvements + +1. [17714](https://github.com/influxdata/influxdb/pull/17714): Cloud environments no longer render markdown images, for security reasons. +1. [17321](https://github.com/influxdata/influxdb/pull/17321): Improve UI for sorting resources +1. [17740](https://github.com/influxdata/influxdb/pull/17740): Add single-color color schemes for visualizations +1. [17849](https://github.com/influxdata/influxdb/pull/17849): Move Organization navigation items to user menu. + +## v2.0.0-beta.8 [2020-04-10] + +### Features + +1. [17490](https://github.com/influxdata/influxdb/pull/17490): `influx config -`, to switch back to previous activated configuration +1. [17581](https://github.com/influxdata/influxdb/pull/17581): Introduce new navigation menu +1. [17595](https://github.com/influxdata/influxdb/pull/17595): Add -f (--file) option to `influx query` and `influx task` commands +1. [17498](https://github.com/influxdata/influxdb/pull/17498): Added support for command line options to limit memory for queries + +### Bug Fixes + +1. [17257](https://github.com/influxdata/influxdb/pull/17769): Fix retention policy after bucket is migrated +1. [17612](https://github.com/influxdata/influxdb/pull/17612): Fix card size and layout jank in dashboards index view +1. [17651](https://github.com/influxdata/influxdb/pull/17651): Fix check graph font and lines defaulting to black causing graph to be unreadable +1. [17660](https://github.com/influxdata/influxdb/pull/17660): Fix text wrapping display issue and popover sizing bug when adding labels to a resource +1. [17670](https://github.com/influxdata/influxdb/pull/17670): Respect the now-time of the compiled query if it's provided +1. [17692](https://github.com/influxdata/influxdb/pull/17692): Update giraffe to fix spacing between ticks +1. [17694](https://github.com/influxdata/influxdb/pull/17694): Fixed typos in the Flux functions list +1. [17701](https://github.com/influxdata/influxdb/pull/17701): Allow mouse cursor inside Script Editor for Safari +1. [17609](https://github.com/influxdata/influxdb/pull/17609): Fixed an issue where Variables could not use other Variables +1. [17754](https://github.com/influxdata/influxdb/pull/17754): Adds error messaging for Cells in Dashboard View + +### UI Improvements + +1. [17583](https://github.com/influxdata/influxdb/pull/17583): Update layout of Alerts page to work on all screen sizes +1. [17657](https://github.com/influxdata/influxdb/pull/17657): Sort dashboards on Getting Started page by recently modified + +## v2.0.0-beta.7 [2020-03-27] + +### Features + +1. [17232](https://github.com/influxdata/influxdb/pull/17232): Allow dashboards to optionally be displayed in light mode +1. [17273](https://github.com/influxdata/influxdb/pull/17273): Add shell completions command for the influx cli +1. [17353](https://github.com/influxdata/influxdb/pull/17353): Make all pkg resources unique by metadata.name field +1. [17363](https://github.com/influxdata/influxdb/pull/17363): Telegraf config tokens can no longer be retrieved after creation, but new tokens can be created after a telegraf has been setup +1. [17400](https://github.com/influxdata/influxdb/pull/17400): Be able to delete bucket by name via cli +1. [17396](https://github.com/influxdata/influxdb/pull/17396): Add module to write line data to specified url, org, and bucket +1. [17398](https://github.com/influxdata/influxdb/pull/17398): Extend influx cli write command with ability to process CSV data +1. [17448](https://github.com/influxdata/influxdb/pull/17448): Add foundation for pkger stacks, stateful package management +1. [17462](https://github.com/influxdata/influxdb/pull/17462): Flag to disable scheduling of tasks +1. [17470](https://github.com/influxdata/influxdb/pull/17470): Add ability to output cli output as json and hide table headers +1. [17472](https://github.com/influxdata/influxdb/pull/17472): Add an easy way to switch config via cli + +### Bug Fixes + +1. [17240](https://github.com/influxdata/influxdb/pull/17240): NodeJS logo displays properly in Firefox +1. [17363](https://github.com/influxdata/influxdb/pull/17363): Fixed telegraf configuration bugs where system buckets were appearing in the buckets dropdown +1. [17391](https://github.com/influxdata/influxdb/pull/17391): Fixed threshold check bug where checks could not be created when a field had a space in the name +1. [17384](https://github.com/influxdata/influxdb/pull/17384): Reuse slices built by iterator to reduce allocations +1. [17404](https://github.com/influxdata/influxdb/pull/17404): Updated duplicate check error message to be more explicit and actionable +1. [17515](https://github.com/influxdata/influxdb/pull/17515): Editing a table cell shows the proper values and respects changes +1. [17521](https://github.com/influxdata/influxdb/pull/17521): Table view scrolling should be slightly smoother +1. [17601](https://github.com/influxdata/influxdb/pull/17601): URL table values on single columns are being correctly parsed +1. [17552](https://github.com/influxdata/influxdb/pull/17552): Fixed a regression bug that insert aggregate functions where the cursor is rather than a new line + +### UI Improvements + +1. [17291](https://github.com/influxdata/influxdb/pull/17291): Redesign OSS Login page +1. [17297](https://github.com/influxdata/influxdb/pull/17297): Display graphic when a dashboard has no cells + +## v2.0.0-beta.6 [2020-03-12] + +### Features + +1. [17085](https://github.com/influxdata/influxdb/pull/17085): Clicking on bucket name takes user to Data Explorer with bucket selected +1. [17095](https://github.com/influxdata/influxdb/pull/17095): Extend pkger dashboards with table view support +1. [17114](https://github.com/influxdata/influxdb/pull/17114): Allow for retention to be provided to influx setup command as a duration +1. [17138](https://github.com/influxdata/influxdb/pull/17138): Extend pkger export all capabilities to support filtering by lable name and resource type +1. [17049](https://github.com/influxdata/influxdb/pull/17049): Added new login and sign-up screen that for cloud users that allows direct login from their region +1. [17170](https://github.com/influxdata/influxdb/pull/17170): Added new cli multiple profiles management tool +1. [17145](https://github.com/influxdata/influxdb/pull/17145): Update kv.Store to define schema changes via new kv.Migrator types + +### Bug Fixes + +1. [17039](https://github.com/influxdata/influxdb/pull/17039): Fixed issue where tasks are exported for notification rules +1. [17042](https://github.com/influxdata/influxdb/pull/17042): Fixed issue where tasks are not exported when exporting by org id +1. [17070](https://github.com/influxdata/influxdb/pull/17070): Fixed issue where tasks with imports in query break in pkger +1. [17028](https://github.com/influxdata/influxdb/pull/17028): Fixed issue where selecting an aggregate function in the script editor was not adding the function to a new line +1. [17072](https://github.com/influxdata/influxdb/pull/17072): Fixed issue where creating a variable of type map was piping the incorrect value when map variables were used in queries +1. [17050](https://github.com/influxdata/influxdb/pull/17050): Added missing user names to auth CLI commands +1. [17113](https://github.com/influxdata/influxdb/pull/17113): Disabled group functionality for check query builder +1. [17120](https://github.com/influxdata/influxdb/pull/17120): Fixed cell configuration error that was popping up when users create a dashboard and accessed the disk usage cell for the first time +1. [17097](https://github.com/influxdata/influxdb/pull/17097): Listing all the default variables in the VariableTab of the script editor +1. [17049](https://github.com/influxdata/influxdb/pull/17049): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections +1. [17161](https://github.com/influxdata/influxdb/pull/17161): Update table custom decimal feature for tables to update table onFocus +1. [17168](https://github.com/influxdata/influxdb/pull/17168): Fixed UI bug that was setting Telegraf config buttons off-center and was resizing config selections when filtering through the data +1. [17208](https://github.com/influxdata/influxdb/pull/17208): Fixed UI bug that was setting causing dashboard cells to error when the a v.bucket was being used and was being configured for the first time +1. [17214](https://github.com/influxdata/influxdb/pull/17214): Fix appearance of client library logos in Safari +1. [17202](https://github.com/influxdata/influxdb/pull/17202): Fixed UI bug that was preventing checks created with the query builder from updating. Also fixed a bug that was preventing dashboard cell queries from working properly when creating group queries using the query builder + +## v2.0.0-beta.5 [2020-02-27] + +### Features + +1. [16991](https://github.com/influxdata/influxdb/pull/16991): Update Flux functions list for v0.61 +1. [16574](https://github.com/influxdata/influxdb/pull/16574): Add secure flag to session cookie + +### Bug Fixes + +1. [16919](https://github.com/influxdata/influxdb/pull/16919): Sort dashboards on homepage alphabetically +1. [16934](https://github.com/influxdata/influxdb/pull/16934): Tokens page now sorts by status +1. [16931](https://github.com/influxdata/influxdb/pull/16931): Set the default value of tags in a Check +1. [16935](https://github.com/influxdata/influxdb/pull/16935): Fix sort by variable type +1. [16973](https://github.com/influxdata/influxdb/pull/16973): Calculate correct stacked line cumulative when lines are different lengths +1. [17010](https://github.com/influxdata/influxdb/pull/17010): Fixed scrollbar issue where resource cards would overflow the parent container rather than be hidden and scrollable +1. [16992](https://github.com/influxdata/influxdb/pull/16992): Query Builder now groups on column values, not tag values +1. [17013](https://github.com/influxdata/influxdb/pull/17013): Scatterplots can once again render the tooltip correctly +1. [17027](https://github.com/influxdata/influxdb/pull/17027): Drop pkger gauge chart requirement for color threshold type +1. [17040](https://github.com/influxdata/influxdb/pull/17040): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections +1. [16961](https://github.com/influxdata/influxdb/pull/16961): Remove cli confirmation of secret, add an optional parameter of secret value + +## v2.0.0-beta.4 [2020-02-14] + +### Features + +1. [16855](https://github.com/influxdata/influxdb/pull/16855): Added labels to buckets in UI +1. [16842](https://github.com/influxdata/influxdb/pull/16842): Connect monaco editor to Flux LSP server +1. [16856](https://github.com/influxdata/influxdb/pull/16856): Update Flux to v0.59.6 + +### Bug Fixes + +1. [16852](https://github.com/influxdata/influxdb/pull/16852): Revert for bad indexing of UserResourceMappings and Authorizations +1. [15911](https://github.com/influxdata/influxdb/pull/15911): Gauge no longer allowed to become too small +1. [16878](https://github.com/influxdata/influxdb/pull/16878): Fix issue with INFLUX_TOKEN env vars being overridden by default token + +## v2.0.0-beta.3 [2020-02-11] + +### Features + +1. [16765](https://github.com/influxdata/influxdb/pull/16765): Extend influx cli pkg command with ability to take multiple files and directories +1. [16767](https://github.com/influxdata/influxdb/pull/16767): Extend influx cli pkg command with ability to take multiple urls, files, directories, and stdin at the same time +1. [16786](https://github.com/influxdata/influxdb/pull/16786): influx cli can manage secrets. + +### Bug Fixes + +1. [16733](https://github.com/influxdata/influxdb/pull/16733): Fix notification rule renaming panics from UI +1. [16769](https://github.com/influxdata/influxdb/pull/16769): Fix the tooltip for stacked line graphs +1. [16825](https://github.com/influxdata/influxdb/pull/16825): Fixed false success notification for read-only users creating dashboards +1. [16822](https://github.com/influxdata/influxdb/pull/16822): Fix issue with pkger/http stack crashing on dupe content type + +## v2.0.0-beta.2 [2020-01-24] + +### Features + +1. [16711](https://github.com/influxdata/influxdb/pull/16711): Query Builder supports group() function (change the dropdown from filter to group) +1. [16523](https://github.com/influxdata/influxdb/pull/16523): Change influx packages to be CRD compliant +1. [16547](https://github.com/influxdata/influxdb/pull/16547): Allow trailing newline in credentials file and CLI integration +1. [16545](https://github.com/influxdata/influxdb/pull/16545): Add support for prefixed cursor search to ForwardCursor types +1. [16504](https://github.com/influxdata/influxdb/pull/16504): Add backup and restore +1. [16522](https://github.com/influxdata/influxdb/pull/16522): Introduce resource logger to tasks, buckets and organizations + +### Bug Fixes + +1. [16656](https://github.com/influxdata/influxdb/pull/16656): Check engine closed before collecting index metrics +1. [16412](https://github.com/influxdata/influxdb/pull/16412): Reject writes which use any of the reserved tag keys +1. [16715](https://github.com/influxdata/influxdb/pull/16715): Fixed dashboard mapping for getDashboards to map correct prop +1. [16716](https://github.com/influxdata/influxdb/pull/16716): Improve the lacking error responses for unmarshal errors in org service + +### Bug Fixes + +1. [16527](https://github.com/influxdata/influxdb/pull/16527): fix /telegrafs panics when using org=org_name parameter + +### UI Improvements + +1. [16575](https://github.com/influxdata/influxdb/pull/16575): Swap billingURL with checkoutURL +1. [16203](https://github.com/influxdata/influxdb/pull/16203): Move cloud navigation to top of page instead of within left side navigation +1. [16536](https://github.com/influxdata/influxdb/pull/16536): Adjust aggregate window periods to be more "reasonable". Use duration input with validation. + +## v2.0.0-beta.1 [2020-01-08] + +### Features + +1. [16234](https://github.com/influxdata/influxdb/pull/16234): Add support for notification endpoints to influx templates/pkgs. +1. [16242](https://github.com/influxdata/influxdb/pull/16242): Drop id prefix for secret key requirement for notification endpoints +1. [16259](https://github.com/influxdata/influxdb/pull/16259): Add support for check resource to pkger parser +1. [16262](https://github.com/influxdata/influxdb/pull/16262): Add support for check resource pkger dry run functionality +1. [16275](https://github.com/influxdata/influxdb/pull/16275): Add support for check resource pkger apply functionality +1. [16283](https://github.com/influxdata/influxdb/pull/16283): Add support for check resource pkger export functionality +1. [16212](https://github.com/influxdata/influxdb/pull/16212): Add new kv.ForwardCursor interface +1. [16297](https://github.com/influxdata/influxdb/pull/16297): Add support for notification rule to pkger parser +1. [16298](https://github.com/influxdata/influxdb/pull/16298): Add support for notification rule pkger dry run functionality +1. [16305](https://github.com/influxdata/influxdb/pull/16305): Add support for notification rule pkger apply functionality +1. [16312](https://github.com/influxdata/influxdb/pull/16312): Add support for notification rule pkger export functionality +1. [16320](https://github.com/influxdata/influxdb/pull/16320): Add support for tasks to pkger parser +1. [16322](https://github.com/influxdata/influxdb/pull/16322): Add support for tasks to pkger dry run functionality +1. [16323](https://github.com/influxdata/influxdb/pull/16323): Add support for tasks to pkger apply functionality +1. [16324](https://github.com/influxdata/influxdb/pull/16324): Add support for tasks to pkger export functionality +1. [16226](https://github.com/influxdata/influxdb/pull/16226): Add group() to Query Builder +1. [16338](https://github.com/influxdata/influxdb/pull/16338): Add last run status to check and notification rules +1. [16340](https://github.com/influxdata/influxdb/pull/16340): Add last run status to tasks +1. [16341](https://github.com/influxdata/influxdb/pull/16341): Extend pkger apply functionality with ability to provide secrets outside of pkg +1. [16345](https://github.com/influxdata/influxdb/pull/16345): Add hide headers flag to influx cli task find cmd +1. [16336](https://github.com/influxdata/influxdb/pull/16336): Manual Overrides for Readiness Endpoint +1. [16347](https://github.com/influxdata/influxdb/pull/16347): Drop legacy inmem service implementation in favor of kv service with inmem dependency +1. [16348](https://github.com/influxdata/influxdb/pull/16348): Drop legacy bolt service implementation in favor of kv service with bolt dependency +1. [16014](https://github.com/influxdata/influxdb/pull/16014): While creating check, also display notification rules that would match check based on tag rules +1. [16389](https://github.com/influxdata/influxdb/pull/16389): Increase default bucket retention period to 30 days +1. [16430](https://github.com/influxdata/influxdb/pull/16430): Added toggle to table thresholds to allow users to choose between setting threshold colors to text or background +1. [16418](https://github.com/influxdata/influxdb/pull/16418): Add Developer Documentation +1. [16260](https://github.com/influxdata/influxdb/pull/16260): Capture User-Agent header as query source for logging purposes +1. [16469](https://github.com/influxdata/influxdb/pull/16469): Add support for configurable max batch size in points write handler +1. [16509](https://github.com/influxdata/influxdb/pull/16509): Add support for applying an influx package via a public facing URL +1. [16511](https://github.com/influxdata/influxdb/pull/16511): Add jsonnet support for influx packages +1. [14782](https://github.com/influxdata/influxdb/pull/16336): Add view page for Check +1. [16537](https://github.com/influxdata/influxdb/pull/16537): Add update password for CLI + +### Bug Fixes + +1. [16225](https://github.com/influxdata/influxdb/pull/16225): Ensures env vars are applied consistently across cmd, and fixes issue where INFLUX\_ env var prefix was not set globally. +1. [16235](https://github.com/influxdata/influxdb/pull/16235): Removed default frontend sorting when flux queries specify sorting +1. [16238](https://github.com/influxdata/influxdb/pull/16238): Store canceled task runs in the correct bucket +1. [16237](https://github.com/influxdata/influxdb/pull/16237): Updated Sortby functionality for table frontend sorts to sort numbers correctly +1. [16249](https://github.com/influxdata/influxdb/pull/16249): Prevent potential infinite loop when finding tasks by organization. +1. [16255](https://github.com/influxdata/influxdb/pull/16255): Retain user input when parsing invalid JSON during import +1. [16268](https://github.com/influxdata/influxdb/pull/16268): Fixed test flakiness that stemmed from multiple flush/signins being called in the same test suite +1. [16346](https://github.com/influxdata/influxdb/pull/16346): Update pkger task export to only trim out option task and not all vars provided +1. [16374](https://github.com/influxdata/influxdb/pull/16374): Update influx CLI, only show "see help" message, instead of the whole usage. +1. [16380](https://github.com/influxdata/influxdb/pull/16380): Fix notification tag matching rules and enable tests to verify +1. [16376](https://github.com/influxdata/influxdb/pull/16376): Extend the y-axis when stacked graph is selected +1. [16404](https://github.com/influxdata/influxdb/pull/16404): Fixed query reset bug that was resetting query in script editor whenever dates were changed +1. [16430](https://github.com/influxdata/influxdb/pull/16430): Fixed table threshold bug that was defaulting set colors to the background. +1. [16435](https://github.com/influxdata/influxdb/pull/16435): Time labels are no longer squished to the left +1. [16427](https://github.com/influxdata/influxdb/pull/16427): Fixed underlying issue with disappearing queries made in Advanced Mode +1. [16439](https://github.com/influxdata/influxdb/pull/16439): Prevent negative zero and allow zero to have decimal places +1. [16376](https://github.com/influxdata/influxdb/pull/16413): Limit data loader bucket selection to non system buckets +1. [16458](https://github.com/influxdata/influxdb/pull/16458): Fix EOF error when manually running tasks from the Task Page. +1. [16491](https://github.com/influxdata/influxdb/pull/16491): Add missing env vals to influx cli usage and fixes precedence of flag/env var priority + +### UI Improvements + +1. [16444](https://github.com/influxdata/influxdb/pull/16444): Add honeybadger reporting to create checks + +## v2.0.0-alpha.21 [2019-12-13] + +### Features + +1. [15836](https://github.com/influxdata/influxdb/pull/16077): Add stacked line layer option to graphs +1. [16094](https://github.com/influxdata/influxdb/pull/16094): Annotate log messages with trace ID, if available +1. [16187](https://github.com/influxdata/influxdb/pull/16187): Bucket create to accept an org name flag +1. [16158](https://github.com/influxdata/influxdb/pull/16158): Add trace ID response header to query endpoint + +### Bug Fixes + +1. [15655](https://github.com/influxdata/influxdb/pull/15655): Allow table columns to be draggable in table settings +1. [15757](https://github.com/influxdata/influxdb/pull/15757): Light up the home page icon when active +1. [15797](https://github.com/influxdata/influxdb/pull/15797): Make numeric inputs first class citizens +1. [15853](https://github.com/influxdata/influxdb/pull/15853): Prompt users to make a dashboard when dashboards are empty +1. [15884](https://github.com/influxdata/influxdb/pull/15884): Remove name editing from query definition during threshold check creation +1. [15975](https://github.com/influxdata/influxdb/pull/15975): Wait until user stops dragging and releases marker before zooming in after threshold changes +1. [16057](https://github.com/influxdata/influxdb/pull/16057): Adds `properties` to each cell on GET /dashboards/{dashboardID} +1. [16101](https://github.com/influxdata/influxdb/pull/16101): Gracefully handle invalid user-supplied JSON +1. [16105](https://github.com/influxdata/influxdb/pull/16105): Fix crash when loading queries built using Query Builder +1. [16112](https://github.com/influxdata/influxdb/pull/16112): Create cell view properties on dashboard creation +1. [16144](https://github.com/influxdata/influxdb/pull/16144): Scrollbars are dapper and proper +1. [16172](https://github.com/influxdata/influxdb/pull/16172): Fixed table ui threshold colorization issue where setting thresholds would not change table UI +1. [16194](https://github.com/influxdata/influxdb/pull/16194): Fixed windowPeriod issue that stemmed from webpack rules +1. [16175](https://github.com/influxdata/influxdb/pull/16175): Added delete functionality to note cells so that they can be deleted +1. [16204](https://github.com/influxdata/influxdb/pull/16204): Fix failure to create labels when creating telegraf configs +1. [16207](https://github.com/influxdata/influxdb/pull/16207): Fix crash when editing a Telegraf config +1. [16201](https://github.com/influxdata/influxdb/pull/16201): Updated start/endtime functionality so that custom script timeranges overwrite dropdown selections +1. [16217](https://github.com/influxdata/influxdb/pull/16217): Fix 12-hour time format to use consistent formatting and number of time ticks + +### UI Improvements + +## v2.0.0-alpha.20 [2019-11-20] + +### Features + +1. [15805](https://github.com/influxdata/influxdb/pull/15924): Add tls insecure skip verify to influx CLI. +1. [15981](https://github.com/influxdata/influxdb/pull/15981): Extend influx cli user create to allow for organization ID and user passwords to be set on user. +1. [15983](https://github.com/influxdata/influxdb/pull/15983): Autopopulate organization ids in the code samples +1. [15749](https://github.com/influxdata/influxdb/pull/15749): Expose bundle analysis tools for frontend resources +1. [15674](https://github.com/influxdata/influxdb/pull/15674): Allow users to view just the output section of a telegraf config +1. [15923](https://github.com/influxdata/influxdb/pull/15923): Allow the users to see string data in the single stat graph type + +### Bug Fixes + +1. [15777](https://github.com/influxdata/influxdb/pull/15777): Fix long startup when running 'influx help' +1. [15713](https://github.com/influxdata/influxdb/pull/15713): Mock missing Flux dependencies when creating tasks +1. [15731](https://github.com/influxdata/influxdb/pull/15731): Ensure array cursor iterator stats accumulate all cursor stats +1. [15866](https://github.com/influxdata/influxdb/pull/15866): Do not show Members section in Cloud environments +1. [15801](https://github.com/influxdata/influxdb/pull/15801): Change how cloud mode is enabled +1. [15820](https://github.com/influxdata/influxdb/pull/15820): Merge frontend development environments +1. [15944](https://github.com/influxdata/influxdb/pull/15944): Refactor table state logic on the frontend +1. [15920](https://github.com/influxdata/influxdb/pull/15920): Arrows in tables now show data in ascending and descening order +1. [15728](https://github.com/influxdata/influxdb/pull/15728): Sort by retention rules now sorts by seconds +1. [15628](https://github.com/influxdata/influxdb/pull/15628): Horizontal scrollbar no longer covering data + +### UI Improvements + +1. [15809](https://github.com/influxdata/influxdb/pull/15809): Redesign cards and animations on getting started page +1. [15787](https://github.com/influxdata/influxdb/pull/15787): Allow the users to filter with labels in telegraph input search + +## v2.0.0-alpha.19 [2019-10-30] + +### Features + +1. [15313](https://github.com/influxdata/influxdb/pull/15313): Add shortcut for toggling comments in script editor +1. [15650](https://github.com/influxdata/influxdb/pull/15650): Expose last run status and last run error in task API + +### UI Improvements + +1. [15503](https://github.com/influxdata/influxdb/pull/15503): Redesign page headers to be more space efficient +1. [15426](https://github.com/influxdata/influxdb/pull/15426): Add 403 handler that redirects back to the sign-in page on oats-generated routes. +1. [15710](https://github.com/influxdata/influxdb/pull/15710): Add button to nginx and redis configuration sections to make interaction more clear + +### Bug Fixes + +1. [15295](https://github.com/influxdata/influxdb/pull/15295): Ensures users are created with an active status +1. [15306](https://github.com/influxdata/influxdb/pull/15306): Added missing string values for CacheStatus type +1. [15348](https://github.com/influxdata/influxdb/pull/15348): Disable saving for threshold check if no threshold selected +1. [15354](https://github.com/influxdata/influxdb/pull/15354): Query variable selector shows variable keys, not values +1. [15246](https://github.com/influxdata/influxdb/pull/15427): UI/Telegraf filter functionality shows results based on input name +1. [13940](https://github.com/influxdata/influxdb/pull/15443): Create Label Overlay UI will disable the submit button and return a UI error if the name field is empty +1. [15452](https://github.com/influxdata/influxdb/pull/15452): Log error as info message on unauthorized API call attempts +1. [15504](https://github.com/influxdata/influxdb/pull/15504): Ensure members&owners eps 404 when /org resource does not exist +1. [15510](https://github.com/influxdata/influxdb/pull/15510): UI/Telegraf sort functionality fixed +1. [15549](https://github.com/influxdata/influxdb/pull/15549): UI/Task edit functionality fixed +1. [15559](https://github.com/influxdata/influxdb/pull/15559): Exiting a configuration of a dashboard cell now properly renders the cell content +1. [15556](https://github.com/influxdata/influxdb/pull/15556): Creating a check now displays on the checklist +1. [15592](https://github.com/influxdata/influxdb/pull/15592): Changed task runs success status code from 200 to 201 to match Swagger documentation. +1. [15634](https://github.com/influxdata/influxdb/pull/15634): TextAreas have the correct height +1. [15647](https://github.com/influxdata/influxdb/pull/15647): Ensures labels are unique by organization in the kv store +1. [15695](https://github.com/influxdata/influxdb/pull/15695): Ensures variable names are unique by organization + +## v2.0.0-alpha.18 [2019-09-26] + +### Features + +1. [15151](https://github.com/influxdata/influxdb/pull/15151): Add jsonweb package for future JWT support +1. [15168](https://github.com/influxdata/influxdb/pull/15168): Added the JMeter Template dashboard +1. [15152](https://github.com/influxdata/influxdb/pull/15152): Add JWT support to http auth middleware + +### UI Improvements + +1. [15211](https://github.com/influxdata/influxdb/pull/15211): Display dashboards index as a grid +1. [15099](https://github.com/influxdata/influxdb/pull/15099): Add viewport scaling to html meta for responsive mobile scaling +1. [15056](https://github.com/influxdata/influxdb/pull/15056): Remove rename and delete functionality from system buckets +1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent new buckets from being named with the reserved "\_" prefix +1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent user from selecting system buckets when creating Scrapers, Telegraf configurations, read/write tokens, and when saving as a task +1. [15056](https://github.com/influxdata/influxdb/pull/15056): Limit values from draggable threshold handles to 2 decimal places +1. [15040](https://github.com/influxdata/influxdb/pull/15040): Redesign check builder UI to fill the screen and make more room for composing message templates +1. [14990](https://github.com/influxdata/influxdb/pull/14990): Move Tokens tab from Settings to Load Data page +1. [14990](https://github.com/influxdata/influxdb/pull/14990): Expose all Settings tabs in navigation menu +1. [15289](https://github.com/influxdata/influxdb/pull/15289): Added Stream and table functions to query builder + +### Bug Fixes + +1. [14931](https://github.com/influxdata/influxdb/pull/14931): Remove scrollbars blocking onboarding UI step. + +## v2.0.0-alpha.17 [2019-08-14] + +### Features + +1. [14809](https://github.com/influxdata/influxdb/pull/14809): Add task middleware's for checks and notifications +1. [14495](https://github.com/influxdata/influxdb/pull/14495): optional gzip compression of the query CSV response. +1. [14567](https://github.com/influxdata/influxdb/pull/14567): Add task types. +1. [14604](https://github.com/influxdata/influxdb/pull/14604): When getting task runs from the API, runs will be returned in order of most recently scheduled first. +1. [14631](https://github.com/influxdata/influxdb/pull/14631): Added Github and Apache templates +1. [14631](https://github.com/influxdata/influxdb/pull/14631): Updated name of Local Metrics template +1. [14631](https://github.com/influxdata/influxdb/pull/14631): Dashboards for all Telegraf config bundles now created +1. [14694](https://github.com/influxdata/influxdb/pull/14694): Add ability to find tasks by name. +1. [14901](https://github.com/influxdata/influxdb/pull/14901): Add ability to Peek() on reads package StreamReader types. + +### UI Improvements + +1. [14917](https://github.com/influxdata/influxdb/pull/14917): Make first steps in Monitoring & Alerting more obvious +1. [14889](https://github.com/influxdata/influxdb/pull/14889): Make adding data to buckets more discoverable +1. [14709](https://github.com/influxdata/influxdb/pull/14709): Move Buckets, Telgrafs, and Scrapers pages into a tab called "Load Data" for ease of discovery +1. [14846](https://github.com/influxdata/influxdb/pull/14846): Standardize formatting of "updated at" timestamp in all resource cards +1. [14887](https://github.com/influxdata/influxdb/pull/14887): Move no buckets warning in telegraf tab above the search box + +### Bug Fixes + +1. [14480](https://github.com/influxdata/influxdb/pull/14480): Fix authentication when updating a task with invalid org or bucket. +1. [14497](https://github.com/influxdata/influxdb/pull/14497): Update the documentation link for Telegraf. +1. [14492](https://github.com/influxdata/influxdb/pull/14492): Fix to surface errors properly as task notifications on create. +1. [14569](https://github.com/influxdata/influxdb/pull/14569): Fix limiting of get runs for task. +1. [14779](https://github.com/influxdata/influxdb/pull/14779): Refactor tasks coordinator. +1. [14846](https://github.com/influxdata/influxdb/pull/14846): Ensure onboarding "advanced" button goes to correct location + +## v2.0.0-alpha.16 [2019-07-25] + +### Bug Fixes + +1. [14385](https://github.com/influxdata/influxdb/pull/14385): Add link to Documentation text in line protocol upload overlay +1. [14344](https://github.com/influxdata/influxdb/pull/14344): Fix issue in Authorization API, can't create auth for another user. +1. [14352](https://github.com/influxdata/influxdb/pull/14352): Fix Influx CLI ignored user flag for auth creation. +1. [14379](https://github.com/influxdata/influxdb/pull/14379): Fix the map example in the documentation +1. [14423](https://github.com/influxdata/influxdb/pull/14423): Ignore null/empty Flux rows which prevents a single stat/gauge crash. +1. [14434](https://github.com/influxdata/influxdb/pull/14434): Fixes an issue where clicking on a dashboard name caused an incorrect redirect. +1. [14441](https://github.com/influxdata/influxdb/pull/14441): Upgrade templates lib to 0.5.0 +1. [14453](https://github.com/influxdata/influxdb/pull/14453): Upgrade giraffe lib to 0.16.1 +1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task +1. [14356](https://github.com/influxdata/influxdb/pull/14356): Fix an issue where canceled tasks did not resume. + +## v2.0.0-alpha.15 [2019-07-11] + +### Features + +1. [14256](https://github.com/influxdata/influxdb/pull/14256): Add time zone support to UI +2. [14243](https://github.com/influxdata/influxdb/pull/14243): Addded new storage inspection tool to verify tsm files +3. [14353](https://github.com/influxdata/influxdb/pull/14353): Require a token to be supplied for all task creation + +### Bug Fixes + +1. [14287](https://github.com/influxdata/influxdb/pull/14287): Fix incorrect reporting of task as successful when error occurs during result iteration +1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task + +### Known Issues + +1. [influxdata/flux#1492](https://github.com/influxdata/flux/issues/1492): Null support in Flux was introduced in Alhpa 14. Several null issues were fixed in this release, but one known issue remains - Users may hit a panic if the first record processed by a map function has a null value. + +## v2.0.0-alpha.14 [2019-06-28] + +### Features + +1. [14221](https://github.com/influxdata/influxdb/pull/14221): Add influxd inspect verify-wal tool +1. [14218](https://github.com/influxdata/influxdb/commit/4faf2a24def4f351aef5b3c0f2907c385f82fdb9): Move to Flux .34.2 - which includes new string functions and initial multi-datasource support with Sql.from() +1. [14164](https://github.com/influxdata/influxdb/pull/14164): Only click save once to save cell +1. [14188](https://github.com/influxdata/influxdb/pull/14188): Enable selecting more columns for line visualizations + +### UI Improvements + +1. [14194](https://github.com/influxdata/influxdb/pull/14194): Draw gauges correctly on HiDPI displays +1. [14194](https://github.com/influxdata/influxdb/pull/14194): Clamp gauge position to gauge domain +1. [14168](https://github.com/influxdata/influxdb/pull/14168): Improve display of error messages +1. [14157](https://github.com/influxdata/influxdb/pull/14157): Remove rendering bottleneck when streaming Flux responses +1. [14165](https://github.com/influxdata/influxdb/pull/14165): Prevent variable dropdown from clipping + +## v2.0.0-alpha.13 [2019-06-13] + +### Features + +1. [14130](https://github.com/influxdata/influxdb/pull/14130): Add static templates for system, docker, redis, kubernetes +1. [14189](https://github.com/influxdata/influxdb/pull/14189): Add option to select a token when creating a task +1. [14200](https://github.com/influxdata/influxdb/pull/14200): Add the ability to update a token when updating a task + +## v2.0.0-alpha.12 [2019-06-13] + +### Features + +1. [14059](https://github.com/influxdata/influxdb/pull/14059): Enable formatting line graph y ticks with binary prefix +1. [14052](https://github.com/influxdata/influxdb/pull/14052): Add x and y column pickers to graph types +1. [14128](https://github.com/influxdata/influxdb/pull/14128): Add option to shade area below line graphs + +### Bug Fixes + +1. [14085](https://github.com/influxdata/influxdb/pull/14085): Fix performance regression in graph tooltips + +### UI Improvements + +## v2.0.0-alpha.11 [2019-05-31] + +1. [14031](https://github.com/influxdata/influxdb/pull/14031): Correctly check if columnKeys include xColumn in heatmap + +## v2.0.0-alpha.10 [2019-05-30] + +### Features + +1. [13945](https://github.com/influxdata/influxdb/pull/13945): Add heatmap visualization type +1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add scatter graph visualization type +1. [13850](https://github.com/influxdata/influxdb/pull/13850): Add description field to Tasks +1. [13924](https://github.com/influxdata/influxdb/pull/13924): Add CLI arguments for configuring session length and renewal +1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add smooth interpolation option to line graphs + +### Bug Fixes + +1. [13753](https://github.com/influxdata/influxdb/pull/13753): Removed hardcoded bucket for Getting Started with Flux dashboard +1. [13783](https://github.com/influxdata/influxdb/pull/13783): Ensure map type variables allow for selecting values +1. [13800](https://github.com/influxdata/influxdb/pull/13800): Generate more idiomatic Flux in query builder +1. [13797](https://github.com/influxdata/influxdb/pull/13797): Expand tab key presses to 2 spaces in the Flux editor +1. [13823](https://github.com/influxdata/influxdb/pull/13823): Prevent dragging of Variable Dropdowns when dragging a scrollbar inside the dropdown +1. [13853](https://github.com/influxdata/influxdb/pull/13853): Improve single stat computation +1. [13945](https://github.com/influxdata/influxdb/pull/13945): Fix crash when opening histogram settings with no data + +### UI Improvements + +1. [#13835](https://github.com/influxdata/influxdb/pull/13835): Render checkboxes in query builder tag selection lists +1. [#13856](https://github.com/influxdata/influxdb/pull/13856): Fix jumbled card text in Telegraf configuration wizard +1. [#13888](https://github.com/influxdata/influxdb/pull/13888): Change scrapers in scrapers list to be resource cards +1. [#13925](https://github.com/influxdata/influxdb/pull/13925): Export and download resource with formatted resource name with no spaces + +## v2.0.0-alpha.9 [2019-05-01] + +**NOTE: This will remove all tasks from your InfluxDB v2.0 instance.** + +### Features + +1. [13423](https://github.com/influxdata/influxdb/pull/13423): Set autorefresh of dashboard to pause if absolute time range is selected +1. [13473](https://github.com/influxdata/influxdb/pull/13473): Switch task back end to a more modular and flexible system +1. [13493](https://github.com/influxdata/influxdb/pull/13493): Add org profile tab with ability to edit organization name +1. [13510](https://github.com/influxdata/influxdb/pull/13510): Add org name to dahboard page title +1. [13520](https://github.com/influxdata/influxdb/pull/13520): Add cautioning to bucket renaming +1. [13560](https://github.com/influxdata/influxdb/pull/13560): Add option to generate all access token in tokens tab +1. [13601](https://github.com/influxdata/influxdb/pull/13601): Add option to generate read/write token in tokens tab +1. [13715](https://github.com/influxdata/influxdb/pull/13715): Added a new Local Metrics Dashboard template that is created during Quick Start + +### Bug Fixes + +1. [13584](https://github.com/influxdata/influxdb/pull/13584): Fixed scroll clipping found in label editing flow +1. [13585](https://github.com/influxdata/influxdb/pull/13585): Prevent overlapping text and dot in time range dropdown +1. [13602](https://github.com/influxdata/influxdb/pull/13602): Updated link in notes cell to a more useful site +1. [13618](https://github.com/influxdata/influxdb/pull/13618): Show error message when adding line protocol +1. [13657](https://github.com/influxdata/influxdb/pull/13657): Update UI Flux function documentation +1. [13718](https://github.com/influxdata/influxdb/pull/13718): Updated System template to support math with floats +1. [13732](https://github.com/influxdata/influxdb/pull/13732): Fixed the window function documentation +1. [13738](https://github.com/influxdata/influxdb/pull/13738): Fixed typo in the `range` Flux function example +1. [13742](https://github.com/influxdata/influxdb/pull/13742): Updated the `systemTime` function to use `system.time` + +### UI Improvements + +1. [13424](https://github.com/influxdata/influxdb/pull/13424): Add general polish and empty states to Create Dashboard from Template overlay + +## v2.0.0-alpha.8 [2019-04-12] + +### Features + +1. [13024](https://github.com/influxdata/influxdb/pull/13024): Add the ability to edit token's description +1. [13078](https://github.com/influxdata/influxdb/pull/13078): Add the option to create a Dashboard from a Template. +1. [13161](https://github.com/influxdata/influxdb/pull/13161): Add the ability to add labels on variables +1. [13171](https://github.com/influxdata/influxdb/pull/13171): Add switch organizations dropdown to home navigation menu item. +1. [13173](https://github.com/influxdata/influxdb/pull/13173): Add create org to side nav +1. [13345](https://github.com/influxdata/influxdb/pull/13345): Added a new Getting Started with Flux Template + +### Bug Fixes + +1. [13284](https://github.com/influxdata/influxdb/pull/13284): Update shift to timeShift in the flux functions side bar + +### UI Improvements + +1. [13287](https://github.com/influxdata/influxdb/pull/13287): Update cursor to grab when hovering draggable areas +1. [13311](https://github.com/influxdata/influxdb/pull/13311): Sync note editor text and preview scrolling +1. [13249](https://github.com/influxdata/influxdb/pull/13249): Add the ability to create a bucket when creating an organization + +## v2.0.0-alpha.7 [2019-03-28] + +### Features + +1. [12663](https://github.com/influxdata/influxdb/pull/12663): Insert flux function near cursor in flux editor +1. [12678](https://github.com/influxdata/influxdb/pull/12678): Enable the use of variables in the Data Explorer and Cell Editor Overlay +1. [12655](https://github.com/influxdata/influxdb/pull/12655): Add a variable control bar to dashboards to select values for variables. +1. [12706](https://github.com/influxdata/influxdb/pull/12706): Add ability to add variable to script from the side menu. +1. [12791](https://github.com/influxdata/influxdb/pull/12791): Use time range for metaqueries in Data Explorer and Cell Editor Overlay +1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View +1. [12843](https://github.com/influxdata/influxdb/pull/12843): Add copy to clipboard button to export overlays +1. [12826](https://github.com/influxdata/influxdb/pull/12826): Enable copying error messages to the clipboard from dashboard cells +1. [12876](https://github.com/influxdata/influxdb/pull/12876): Add the ability to update token's status in Token list +1. [12821](https://github.com/influxdata/influxdb/pull/12821): Allow variables to be re-ordered within control bar on a dashboard. +1. [12888](https://github.com/influxdata/influxdb/pull/12888): Add the ability to delete a template +1. [12901](https://github.com/influxdata/influxdb/pull/12901): Save user preference for variable control bar visibility and default to visible +1. [12910](https://github.com/influxdata/influxdb/pull/12910): Add the ability to clone a template +1. [12958](https://github.com/influxdata/influxdb/pull/12958): Add the ability to import a variable + +### Bug Fixes + +1. [12684](https://github.com/influxdata/influxdb/pull/12684): Fix mismatch in bucket row and header +1. [12703](https://github.com/influxdata/influxdb/pull/12703): Allows user to edit note on cell +1. [12764](https://github.com/influxdata/influxdb/pull/12764): Fix empty state styles in scrapers in org view +1. [12790](https://github.com/influxdata/influxdb/pull/12790): Fix bucket creation error when changing rentention rules types. +1. [12793](https://github.com/influxdata/influxdb/pull/12793): Fix task creation error when switching schedule types. +1. [12805](https://github.com/influxdata/influxdb/pull/12805): Fix hidden horizonal scrollbars in flux raw data view +1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View +1. [12961](https://github.com/influxdata/influxdb/pull/12961): Fix scroll clipping in graph legends & dropdown menus +1. [12959](https://github.com/influxdata/influxdb/pull/12959): Fix routing loop + +### UI Improvements + +1. [12782](https://github.com/influxdata/influxdb/pull/12782): Move bucket selection in the query builder to the first card in the list +1. [12850](https://github.com/influxdata/influxdb/pull/12850): Ensure editor is automatically focused in note editor +1. [12915](https://github.com/influxdata/influxdb/pull/12915): Add ability to edit a template's name. + +## v2.0.0-alpha.6 [2019-03-15] + +### Release Notes + +We have updated the way we do predefined dashboards to [include Templates](https://github.com/influxdata/influxdb/pull/12532) in this release which will cause existing Organizations to not have a System dashboard created when they build a new Telegraf configuration. In order to get this functionality, remove your existing data and start from scratch. + +**NOTE: This will remove all data from your InfluxDB v2.0 instance including timeseries data.** + +On most `linux` systems including `macOS`: + +```sh +$ rm -r ~/.influxdbv2 +``` + +Once completed, `v2.0.0-alpha.6` can be started. + +### Features + +1. [12496](https://github.com/influxdata/influxdb/pull/12496): Add ability to import a dashboard +1. [12524](https://github.com/influxdata/influxdb/pull/12524): Add ability to import a dashboard from org view +1. [12531](https://github.com/influxdata/influxdb/pull/12531): Add ability to export a dashboard and a task +1. [12615](https://github.com/influxdata/influxdb/pull/12615): Add `run` subcommand to influxd binary. This is also the default when no subcommand is specified. +1. [12523](https://github.com/influxdata/influxdb/pull/12523): Add ability to save a query as a variable from the Data Explorer. +1. [12532](https://github.com/influxdata/influxdb/pull/12532): Add System template on onboarding + +### Bug Fixes + +1. [12641](https://github.com/influxdata/influxdb/pull/12641): Stop scrollbars from covering text in flux editor + +### UI Improvements + +1. [12610](https://github.com/influxdata/influxdb/pull/12610): Fine tune keyboard interactions for managing labels from a resource card + +## v2.0.0-alpha.5 [2019-03-08] + +### Release Notes + +This release includes a [breaking change](https://github.com/influxdata/influxdb/pull/12391) to the format that TSM and index data are stored on disk. +Any existing local data will not be queryable once InfluxDB is upgraded to this release. +Prior to installing this release we recommend all storage-engine data is removed from your local InfluxDB `2.x` installation; this can be achieved without losing any of your other InfluxDB `2.x` data (settings etc). +To remove only local storage data, run the following in a terminal. + +On most `linux` systems: + +```sh + +# Replace with your actual username. + +$ rm -r /home//.influxdbv2/engine +``` + +On `macOS`: + +```sh +# Replace with your actual username. + +$ rm -r /Users//.influxdbv2/engine +``` + +Once completed, `v2.0.0-alpha.5` can be started. + +### Features + +1. [12096](https://github.com/influxdata/influxdb/pull/12096): Add labels to cloned tasks +1. [12111](https://github.com/influxdata/influxdb/pull/12111): Add ability to filter resources by clicking a label +1. [12401](https://github.com/influxdata/influxdb/pull/12401): Add ability to add a member to org +1. [12391](https://github.com/influxdata/influxdb/pull/12391): Improve representation of TSM tagsets on disk +1. [12437](https://github.com/influxdata/influxdb/pull/12437): Add ability to remove a member from org + +### Bug Fixes + +1. [12302](https://github.com/influxdata/influxdb/pull/12302): Prevent clipping of code snippets in Firefox +1. [12379](https://github.com/influxdata/influxdb/pull/12379): Prevent clipping of cell edit menus in dashboards + +### UI Improvements + +1. [12302](https://github.com/influxdata/influxdb/pull/12302): Make code snippet copy functionality easier to use +1. [12304](https://github.com/influxdata/influxdb/pull/12304): Always show live preview in Note Cell editor +1. [12317](https://github.com/influxdata/influxdb/pull/12317): Redesign Create Scraper workflow +1. [12317](https://github.com/influxdata/influxdb/pull/12317): Show warning in Telegrafs and Scrapers lists when user has no buckets +1. [12384](https://github.com/influxdata/influxdb/pull/12384): Streamline label addition, removal, and creation from the dashboards list +1. [12464](https://github.com/influxdata/influxdb/pull/12464): Improve label color selection + +## v2.0.0-alpha.4 [2019-02-21] + +### Features + +1. [11954](https://github.com/influxdata/influxdb/pull/11954): Add the ability to run a task manually from tasks page +1. [11990](https://github.com/influxdata/influxdb/pull/11990): Add the ability to select a custom time range in explorer and dashboard +1. [12009](https://github.com/influxdata/influxdb/pull/12009): Display the version information on the login page +1. [12011](https://github.com/influxdata/influxdb/pull/12011): Add the ability to update a Variable's name and query. +1. [12026](https://github.com/influxdata/influxdb/pull/12026): Add labels to cloned dashboard +1. [12018](https://github.com/influxdata/influxdb/pull/12057): Add ability filter resources by label name +1. [11973](https://github.com/influxdata/influxdb/pull/11973): Add ability to create or add labels to a resource from labels editor + +### Bug Fixes + +1. [11997](https://github.com/influxdata/influxdb/pull/11997): Update the bucket retention policy to update the time in seconds + +### UI Improvements + +1. [12016](https://github.com/influxdata/influxdb/pull/12016): Update the preview in the label overlays to be shorter +1. [12012](https://github.com/influxdata/influxdb/pull/12012): Add notifications to scrapers page for created/deleted/updated scrapers +1. [12023](https://github.com/influxdata/influxdb/pull/12023): Add notifications to buckets page for created/deleted/updated buckets +1. [12072](https://github.com/influxdata/influxdb/pull/12072): Update the admin page to display error for password length + +## v2.0.0-alpha.3 [2019-02-15] + +### Features + +1. [11809](https://github.com/influxdata/influxdb/pull/11809): Add the ability to name a scraper target +1. [11821](https://github.com/influxdata/influxdb/pull/11821): Display scraper name as the first and only updatable column in scrapers list +1. [11804](https://github.com/influxdata/influxdb/pull/11804): Add the ability to view runs for a task +1. [11824](https://github.com/influxdata/influxdb/pull/11824): Display last completed run for tasks list +1. [11836](https://github.com/influxdata/influxdb/pull/11836): Add the ability to view the logs for a specific task run + +### Bug Fixes + +1. [11819](https://github.com/influxdata/influxdb/pull/11819): Update the inline edit for resource names to guard for empty strings +1. [11852](https://github.com/influxdata/influxdb/pull/11852): Prevent a new template dashboard from being created on every telegraf config update +1. [11848](https://github.com/influxdata/influxdb/pull/11848): Fix overlapping buttons in the telegrafs verify data step + +### UI Improvements + +1. [11764](https://github.com/influxdata/influxdb/pull/11764): Move the download telegraf config button to view config overlay +1. [11879](https://github.com/influxdata/influxdb/pull/11879): Combine permissions for user by type +1. [11938](https://github.com/influxdata/influxdb/pull/11938): Add ordering to UI list items + +## v2.0.0-alpha.2 [2019-02-07] + +### Features + +1. [11677](https://github.com/influxdata/influxdb/pull/11677): Add instructions button to view `$INFLUX_TOKEN` setup for telegraf configs +1. [11693](https://github.com/influxdata/influxdb/pull/11693): Save the \$INFLUX_TOKEN environmental variable in telegraf configs +1. [11700](https://github.com/influxdata/influxdb/pull/11700): Update Tasks tab on Org page to look like Tasks Page +1. [11740](https://github.com/influxdata/influxdb/pull/11740): Add view button to view the telegraf config toml +1. [11522](https://github.com/influxdata/influxdb/pull/11522): Add plugin information step to allow for config naming and configure one plugin at a time +1. [11758](https://github.com/influxdata/influxdb/pull/11758): Update Dashboards tab on Org page to look like Dashboards Page +1. [11810](https://github.com/influxdata/influxdb/pull/11810): Add tab for template variables under organizations page + +## Bug Fixes + +1. [11678](https://github.com/influxdata/influxdb/pull/11678): Update the System Telegraf Plugin bundle to include the swap plugin +1. [11722](https://github.com/influxdata/influxdb/pull/11722): Revert behavior allowing users to create authorizations on behalf of another user + +### UI Improvements + +1. [11683](https://github.com/influxdata/influxdb/pull/11683): Change the wording for the plugin config form button to Done +1. [11689](https://github.com/influxdata/influxdb/pull/11689): Change the wording for the Collectors configure step button to Create and Verify +1. [11697](https://github.com/influxdata/influxdb/pull/11697): Standardize page loading spinner styles +1. [11711](https://github.com/influxdata/influxdb/pull/11711): Show checkbox on Save As button in data explorer +1. [11705](https://github.com/influxdata/influxdb/pull/11705): Make collectors plugins side bar visible in only the configure step +1. [11745](https://github.com/influxdata/influxdb/pull/11745): Swap retention policies on Create bucket page + +## v2.0.0-alpha.1 [2019-01-23] + +### Release Notes + +This is the initial alpha release of InfluxDB 2.0. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d6b85bbdaf..2de5777dbff 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,19 +1,41 @@ # Contributing to InfluxDB v2 -## Bug reports -Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. -If you file an issue, please include the following. -* Full details of your operating system (or distribution) e.g. `64bit Ubuntu 18.04`. -* The version of InfluxDB you are running -* Whether you installed it using a pre-built package, or built it from source. -* Clear steps to reproduce the issue described, if at all possible. +## How to report a bug -The easier it is for us to reproduce the problem, the easier it is for us to fix it. -If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) +Before you report an issue, please [search existing issues](https://github.com/influxdata/influxdb/issues) to check whether it's +already been reported, or perhaps even fixed. +If you choose to report an issue, please include the following in your report: +- Full details of your operating system (or distribution)--for example, `64bit Ubuntu 18.04`. + To get your operating system details, run the following command in your terminal + and copy-paste the output into your report: + + ```sh + uname -srm + ``` +- How you installed InfluxDB. Did you use a pre-built package or did you build from source? +- The version of InfluxDB you're running. + If you installed InfluxDB using a pre-built package, run the following command in your terminal and then copy-paste the output into your report: + + ```sh + influxd version + ``` + + If you built and ran `influxd` from source, run the following command from your *influxdb* directory and then copy-paste the output into your report: + + ```sh + bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd version + ``` +- [Clear steps to reproduce the issue](#how-to-provide-steps-to-reproduce-an-issue) + +### How to provide steps for reproducing an issue + +The easier we can reproduce the problem, the easier we can fix it. +To learn how to write an effective bug report, we recommend reading [Simon Tatham's essay, "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html). + +When describing how to reproduce the issue, +please provide test cases in the form of `curl` commands--for example: -Ideally, test cases would be in the form of `curl` commands. -For example: ```bash # write data curl -XPOST "http://localhost:8086/api/v2/write?org=YOUR_ORG&bucket=YOUR_BUCKET&precision=s" \ @@ -32,8 +54,8 @@ curl http://localhost:8086/api/v2/query?org=my-org -XPOST -sS \ |> sum()' ``` -Test cases with `influx` commands are also helpful. -For example: +Test cases with `influx` CLI commands are also helpful--for example: + ``` # write data influx write -o YOUR_ORG -b YOUR_BUCKET -p s -t YOURAUTHTOKEN \ @@ -47,213 +69,166 @@ influx query -o YOUR_ORG -t YOURAUTHTOKEN 'from(bucket:"example-bucket") |> sum()' ``` -If you don't include a clear test case like this it will be very difficult for us to investigate your issue. -If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. +If you don't provide clear test cases like the examples above, then investigating your issue will be very difficult for us. +If you have trouble including data in your report, please zip up your data directory and include a link to it in your bug report. -Please note that issues are *not the place to file general support requests* such as "how do I use collectd with InfluxDB?" -Questions of this nature should be sent to the [InfluxData Community](https://community.influxdata.com/), not filed as issues. +Note that issues are _not the place to file general support requests_ such as "How do I use `collectd` with InfluxDB?" +Please submit requests for help to the [InfluxData Community](https://community.influxdata.com/) - don't report them as issues in the repo. -## Feature requests -We really like to receive feature requests as it helps us prioritize our work. -Please be clear about your requirements and goals, help us to understand what you would like to see added to InfluxD with examples and the reasons why it is important to you. -If you find your feature request already exists as a Github issue please indicate your support for that feature by using the "thumbs up" reaction. +## How to request a feature -## Contributing to the source code +We encourage you to submit feature requests as they help us prioritize our work. -You should read our [coding guide](https://github.com/influxdata/influxdb/blob/master/DEVELOPMENT.md), to understand better how to write code for InfluxDB. +In your feature request, please include the following: +- Clear requirements and goals. +- What you would like to see added to InfluxDB. +- Examples. +- Why the feature is important to you. -## Submitting a pull request -To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. -Then generate a pull request from your branch against *master* of the InfluxDB repository. -Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. -Also, be sure to run the test suite with your change in place. -Changes that cause tests to fail cannot be merged. +If you find your request already exists in a Github issue, +please indicate your support for the existing issue by using the "thumbs up" reaction. -There will usually be some back and forth as we finalize the change, but once that completes it may be merged. +## How to submit a pull (change) request -To assist in review for the PR, please add the following to your pull request comment: +To submit a change for code or documentation in this repository, please [create a pull request](https://github.com/influxdata/influxdb/compare) and follow the instructions in the pull request template to help us review your PR. +After you complete the template steps and submit the PR, expect some deliberation as we review and finalize the change. +Once your PR is approved, you can merge it. -```md -- [ ] CHANGELOG.md updated -- [ ] Rebased/mergable -- [ ] Tests pass -- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) -``` +## How to report security vulnerabilities -## Security Vulnerability Reporting InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). ## Signing the CLA -If you are going to be contributing back to InfluxDB please take a second to sign our CLA, which can be found [on our website](https://influxdata.com/community/cla/). +Before you contribute to InfluxDB, please sign our [Individual Contributor License Agreement (CLA)](https://influxdata.com/community/cla/). -## Building from Source +## How to build InfluxDB from source -### Installing Go +### Install Go -InfluxDB requires Go 1.15. +InfluxDB requires Go 1.18. At InfluxData we find `gvm`, a Go version manager, useful for installing Go. For instructions on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). -After installing gvm you can install and set the default go version by running the following: +After installing `gvm` you can install and set the default Go version by running the following: + ```bash -$ gvm install go1.15 -$ gvm use go1.15 --default +$ gvm install go1.18 +$ gvm use go1.18 --default ``` InfluxDB requires Go module support. Set `GO111MODULE=on` or build the project outside of your `GOPATH` for it to succeed. For information about modules, please refer to the [wiki](https://github.com/golang/go/wiki/Modules). -### Revision Control Systems +### Install revision control systems Go has the ability to import remote packages via revision control systems with the `go get` command. -To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. -Currently the project only depends on `git` and `bzr`. +To ensure that you can retrieve any remote package, install `git` and `bzr` revision control software, following the instructions for your system: - * [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) - * [Install Bazaar](http://doc.bazaar.canonical.com/latest/en/user-guide/installing_bazaar.html) +- [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) +- [Install Bazaar](http://doc.bazaar.canonical.com/latest/en/user-guide/installing_bazaar.html) -### Additional Dependencies +### Install additional dependencies -You need a recent stable version of Rust. We recommend using [rustup](https://rustup.rs/) to install Rust. +In addition to `go`, `git`, and `bzr`, you will need the following prerequisites +installed on your system: -You also need `protobuf`, `yarn`, and `make` installed. +- Rust (a recent stable version, 1.60 or higher). + To install Rust, we recommend using [rustup](https://rustup.rs/). +- `clang` +- `make` +- `pkg-config` +- `protobuf` +- Go protobuf plugin. To use Go to install the plugin, enter the following command in your terminal: -- OSX: `brew install protobuf yarn make` -- Linux (Arch): `pacman -S protobuf yarn make` -- Linux (Ubuntu, RHEL): See below + `go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28` -#### Ubuntu-specific instructions: +To install prerequisites, use the following example command for your system: -For Ubuntu, you need to change the apt repository for `yarn`: +- OSX: `brew install pkg-config protobuf` + - For OSX, you must have [HomeBrew](https://brew.sh) installed. + - You will also need the [Developer Tools](https://webkit.org/build-tools/), which includes `make`. +- Linux (Arch): `pacman -S clang make pkgconf protobuf` +- Linux (Ubuntu): `sudo apt install make clang pkg-config protobuf-compiler libprotobuf-dev build-essential` +- Linux (RHEL): see the [RedHat-specific instructions](#redhat-specific-instructions). -``` -sudo apt remove yarn cmdtest -wget -qO- https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - -sudo apt-add-repository "deb https://dl.yarnpkg.com/debian/ stable main" - -sudo apt install protobuf-compiler libprotobuf-dev yarn make -``` - -#### Redhat-specific instructions - -For RedHat, there are some extra steps: - -1. You must enable the [EPEL](https://fedoraproject.org/wiki/EPEL) -2. You must add the `yarn` [repository](https://yarnpkg.com/lang/en/docs/install/#centos-stable) +#### RedHat-specific instructions -### Building with make +For RedHat, you must enable the [EPEL](https://fedoraproject.org/wiki/EPEL) -A successful `make` run results in two binaries, with platform-dependent paths: - -``` -$ make -... -env GO111MODULE=on go build -tags 'assets ' -o bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx ./cmd/influx -env GO111MODULE=on go build -tags 'assets ' -o bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd ./cmd/influxd -``` +### Build influxd with make `influxd` is the InfluxDB service. -`influx` is the CLI management tool. - -Start the service. -Logs to stdout by default: - -``` -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd -``` - -### Building with the go command -The `Makefile` provides a wrapper around configuring the utilities for building influxdb. For those wanting to use the `go` command directly, one of two things can be done. +For `influx`, the InfluxDB CLI tool, see the [influx-cli repository on Github](https://github.com/influxdata/influx-cli). -First, the `env` script is located in the root of the repository. This script can be used to execute `go` commands with the appropriate environment configuration. - -```bash -$ ./env go build ./cmd/influxd -``` +Once you've installed the dependencies, +follow these steps to build `influxd` from source and start the service: -Another method is to configure the `pkg-config` utility. Follow the instructions [here](https://github.com/influxdata/flux#getting-started) to install and configure `pkg-config` and then the normal go commands will work. +1. Clone this repo (influxdb). +2. In your influxdb directory, run `make` to generate the influxd binary: -The first step is to install the `pkg-config` command. + ```sh + make + ``` -```bash -# On Debian/Ubuntu -$ sudo apt-get install -y clang pkg-config -# On Mac OS X with Homebrew -$ brew install pkg-config -``` + If successful, `make` installs the binary to a platform-specific path for your system. + The output is the following: -Install the `pkg-config` wrapper utility of the same name to a different path that is earlier in the PATH. + ```sh + env GO111MODULE=on go build -tags 'assets ' -o bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd ./cmd/influxd + ``` -```bash -# Install the pkg-config wrapper utility -$ go build -o ~/go/bin/ github.com/influxdata/pkg-config -# Ensure the GOBIN directory is on your PATH -$ export PATH=$HOME/go/bin:${PATH} -$ which -a pkg-config -/home/user/go/bin/pkg-config -/usr/bin/pkg-config -``` +3. To start the `influxd` service that runs InfluxDB, enter the following command + to run the platform-specific binary: -Then all `go` build commands should work. + ``` + bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd + ``` -```bash -$ go build ./cmd/influxd -$ go test ./... -``` + `influxd` logs to `stdout` by default. +**Troubleshooting** -### Testing - -This project is built from various languages. To run test for all langauges and components use: +- If you've changed Go or Rust versions and have trouble building, try running `go clean -r -x -cache -testcache -modcache ./` to clear out old build artifacts that may be incompatible. -```bash -$ make test -``` +### Run tests -To run tests for just the Javascript component use: +This project is built from various languages. +To run tests for all languages and components, enter the following command in +your terminal: ```bash -$ make test-js +make test ``` -To run tests for just the Go/Rust components use: +To run tests for only Go and Rust components, enter the following command in your terminal: ```bash -$ make test-go +make test-go ``` - ## Generated Google Protobuf code -Most changes to the source do not require that the generated protocol buffer code be changed. -But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. +Most changes to the source don't require changes to the generated protocol buffer code. +If you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. -First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ -) 2.6.1 or later for your OS: +First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/) 3.17.3 or later for your OS. -Then install the go plugins: +Then run `go generate` after updating any `*.proto` file: ```bash -$ go get github.com/gogo/protobuf/proto -$ go get github.com/gogo/protobuf/protoc-gen-gogo -$ go get github.com/gogo/protobuf/gogoproto +go generate ./... ``` -Finally run, `go generate` after updating any `*.proto` file: - -```bash -$ go generate ./... -``` -**Troubleshooting** +**How to troubleshoot protobuf** If generating the protobuf code is failing for you, check each of the following: -* Ensure the protobuf library can be found. Make sure that `LD_LIBRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. -* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. +- Ensure the protobuf library can be found. Make sure that `LD_LIBRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. +- Ensure the command `protoc-gen-go`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. ## Generated Go Templates @@ -295,7 +270,8 @@ $ go tool pprof ./influxd influxd.prof # once inside run "web", opens up browser with the CPU graph # can also run "web " to zoom in. Or "list " to see specific lines ``` -Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*. + +Note that when you pass the binary to `go tool pprof` _you must specify the path to the binary_. If you are profiling benchmarks built with the `testing` package, you may wish to use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md deleted file mode 100644 index 6f6327e726d..00000000000 --- a/DEVELOPMENT.md +++ /dev/null @@ -1,105 +0,0 @@ -# Local Development and Writing Code for Influxdb - -**Table of Contents** -* [Quickstart](#quickstart) -* [Full Length Guide](#full-length-guide) -* [Getting Some Graphs](#getting-some-graphs) -* [Getting Help](#getting-help) - -## Quickstart - -## Docker - -We've provided `make` targets that provide an "interactive" development experience using Docker. - - -```console -make dshell -``` - -This command builds a development container image and puts you inside a container with all the tooling you require to develop and build InfluxDB. -You can use the "Local" instructions once inside this container and work on the premise that you have everything installed. - -Other container runtimes should work, but we've only tested with Docker and Podman (`alias docker=podman`). - -## Local - -Assuming you have Go 1.13, Node LTS, and yarn installed, and some means of ingesting data locally (e.g. telegraf): - -You'll need two terminal tabs to run influxdb from source: one to run the go application server, the other to run the development server that will listen for front-end changes, rebuild the bundle, serve the new bundle, then reload your webpage for you. - -Tab 1: - -```sh -go run ./cmd/influxd --assets-path=ui/build -``` - -Tab 2: - -```sh -cd ui -yarn && yarn start -``` - -If there are no errors, hit [localhost:8080](http://localhost:8080) and follow the prompts to setup your username and password. *Note the port difference: `8080` vs the production `8086`* - -You're set up to develop Influx locally. Any changes you make to front-end code under the `ui/` directory will be updated after the watcher process (that was initiated by running `yarn start`) sees them and rebuilds the bundle. Any changes to go code will need to be re-compiled by re-running the `go run` command above. - -See [Getting some Graphs](#getting-some-graphs) for next steps. - -## Full-Length Guide - -To get started with Influx, you'll need to install these tools if you don't already have them: - -1. [Install go](https://golang.org/doc/install) -1. [Install nodejs](https://nodejs.org/en/download/package-manager/) -1. [Install yarn](https://yarnpkg.com/lang/en/docs/install/) - -Yarn is a package manager for nodejs and an alternative to npm. - -To run Influx locally, you'll need two terminal tabs: one to run the go application server, the other to run the development server that will listen for front-end changes, rebuild the bundle, serve the new bundle, then reload your webpage for you. - -Tab 1: - -```sh -go run ./cmd/influxd --assets-path=ui/build -``` - -This starts the influxdb application server. It handles API requests and can be reached via `localhost:8086`. Any changes to go code will need to be re-compiled by re-running the `go run` command above. - -Tab 2: - -```sh -cd ui -yarn install -yarn start -``` - -This installs front-end dependencies and starts the front-end build server. It will listen to changes to TypeScript and JavaScript files, rebuild the front-end bundle, serve that bundle, then auto reload any pages with changes. If everything went smoothly without errors, you should be able to go to [localhost:8080.](http://localhost:8080) and follow the prompts to login or to setup your username and password. - -If you're setting things up for the first time, be sure to check out the [the official getting started guide](https://v2.docs.influxdata.com/v2.0/get-started/) to get make sure you configure everything properly. - -### Testing Changes - -To make sure everything got wired up properly, we'll want to make a minor change on the frontend and see that it's added. - -Add a newline and following log statement to the [entry point to the app:](https://github.com/influxdata/influxdb/blob/master/ui/src/index.tsx#L468) - -```js -console.log('hello, world!') -``` - -Your browser should reload the page after you save your changes (sometimes this happens quickly and is hard to spot). Open your browser console and you should see your message after the page reloads. - -## Getting some Graphs - -If you haven't set up telegraf yet, [following the official telegraf documentation](https://v2.docs.influxdata.com/v2.0/write-data/no-code/use-telegraf/) is the quickest and most straightforward and hassle-free way of getting some data into your local instance. The documentation there will be kept fresher and and more current than this tutorial. - -Learning how to input Line protocol data is a great tool if you need to debug with arbitrary adhoc data. Check out a quick intro to the [Line protocol](https://v2.docs.influxdata.com/v2.0/write-data/#what-you-ll-need), and learn how to [input it via the ui.](https://v2.docs.influxdata.com/v2.0/write-data/#user-interface) *Since we're running `influxd` locally, you can skip step 1.* - -## Getting Help - -If you get stuck, the following resources might help: - -* [Influx Community Slack #V2 channel](https://app.slack.com/client/TH8RGQX5Z/CH8RV8PK5) -* [InfluxData subreddit](https://www.reddit.com/r/InfluxData/) diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 8302a4c132a..00000000000 --- a/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -FROM ubuntu:20.04 AS dbuild - -ENV DEBIAN_FRONTEND noninteractive - -# Needed for Yarn steps to veryify the keys -RUN apt update -RUN apt install --yes curl gnupg2 -RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - -RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list - -# Now update index with Yarn -RUN apt update -RUN apt install --yes \ - cargo \ - git \ - golang \ - libclang-dev \ - llvm-dev \ - make \ - nodejs \ - protobuf-compiler \ - ragel \ - rustc \ - yarn - -FROM dbuild AS dshell - -ARG USERID=1000 -RUN adduser --quiet --home /code --uid ${USERID} --disabled-password --gecos "" influx -USER influx - -ENTRYPOINT [ "/bin/bash" ] - -FROM dbuild AS dbuild-all - -COPY . /code -WORKDIR /code -RUN make - -## -# InfluxDB Image (Monolith) -## -FROM debian:stretch-slim AS influx - -COPY --from=dbuild-all /code/bin/linux/influxd /usr/bin/influxd -COPY --from=dbuild-all /code/bin/linux/influx /usr/bin/influx - -EXPOSE 8086 - -ENTRYPOINT [ "/usr/bin/influxd" ] - -## -# InfluxDB UI Image -## -FROM nginx:alpine AS ui - -EXPOSE 80 - -COPY --from=dbuild-all /code/ui/build /usr/share/nginx/html diff --git a/GNUmakefile b/GNUmakefile new file mode 100644 index 00000000000..01a138e7101 --- /dev/null +++ b/GNUmakefile @@ -0,0 +1,194 @@ +# Top level Makefile for the entire project +# +# This Makefile encodes the "go generate" prerequisites ensuring that the proper tooling is installed and +# that the generate steps are executed when their prerequisite files change. +# +# This Makefile follows a few conventions: +# +# * All cmds must be added to this top level Makefile. +# * All binaries are placed in ./bin, its recommended to add this directory to your PATH. +# +export GOPATH=$(shell go env GOPATH) +export GOOS=$(shell go env GOOS) +export GOARCH=$(shell go env GOARCH) + +ifneq (,$(filter $(GOARCH),amd64 s390x)) + # Including the assets tag requires the UI to be built for compilation to succeed. + # Don't force it for running tests. + GO_TEST_TAGS := + GO_BUILD_TAGS := assets +else + # noasm needed to avoid a panic in Flux for non-amd64, non-s390x. + GO_TEST_TAGS := noasm + GO_BUILD_TAGS := assets,noasm +endif + +# Tags used for builds and tests on all architectures +COMMON_TAGS := sqlite_foreign_keys,sqlite_json + +GO_TEST_ARGS := -tags '$(COMMON_TAGS),$(GO_TEST_TAGS)' +GO_BUILD_ARGS := -tags '$(COMMON_TAGS),$(GO_BUILD_TAGS)' + +# Use default flags, but allow adding -gcflags "..." if desired. Eg, for debug +# builds, may want to use GCFLAGS="all=-N -l" in the build environment. +GCFLAGS ?= +ifneq ($(GCFLAGS),) +GO_BUILD_ARGS += -gcflags "$(GCFLAGS)" +endif + +ifeq ($(OS), Windows_NT) + VERSION := $(shell git describe --exact-match --tags 2>nil) +else + VERSION := $(shell git describe --exact-match --tags 2>/dev/null) +endif +COMMIT := $(shell git rev-parse --short HEAD) + +LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) +ifdef VERSION + LDFLAGS += -X main.version=$(VERSION) +endif + +# Allow for `go test` to be swapped out by other tooling, i.e. `gotestsum` +GO_TEST_CMD=go test +# Allow for a subset of tests to be specified. +GO_TEST_PATHS=./... + +# Test vars can be used by all recursive Makefiles +export PKG_CONFIG:=$(PWD)/scripts/pkg-config.sh +export GO_BUILD=env GO111MODULE=on go build $(GO_BUILD_ARGS) -ldflags "$(LDFLAGS)" +export GO_INSTALL=env GO111MODULE=on go install $(GO_BUILD_ARGS) -ldflags "$(LDFLAGS)" +export GO_TEST=env GOTRACEBACK=all GO111MODULE=on $(GO_TEST_CMD) $(GO_TEST_ARGS) +# Do not add GO111MODULE=on to the call to go generate so it doesn't pollute the environment. +export GO_GENERATE=go generate $(GO_BUILD_ARGS) +export GO_VET=env GO111MODULE=on go vet $(GO_TEST_ARGS) +export GO_RUN=env GO111MODULE=on go run $(GO_BUILD_ARGS) +export PATH := $(PWD)/bin/$(GOOS):$(PATH) + + +# All go source files +SOURCES := $(shell find . -name '*.go' -not -name '*_test.go') go.mod go.sum + +# All go source files excluding the vendored sources. +SOURCES_NO_VENDOR := $(shell find . -path ./vendor -prune -o -name "*.go" -not -name '*_test.go' -print) + +# List of binary cmds to build +CMDS := \ + bin/$(GOOS)/influxd + +all: generate $(CMDS) + +# +# Define targets for commands +# +bin/$(GOOS)/influxd: $(SOURCES) + $(GO_BUILD) -o $@ ./cmd/$(shell basename "$@") + +influxd: bin/$(GOOS)/influxd + +static/data/build: scripts/fetch-ui-assets.sh + ./scripts/fetch-ui-assets.sh + +static/data/swagger.json: scripts/fetch-swagger.sh + ./scripts/fetch-swagger.sh + +# static/static_gen.go is the output of go-bindata, embedding all assets used by the UI. +static/static_gen.go: static/data/build static/data/swagger.json + $(GO_GENERATE) ./static + +# +# Define action only targets +# + +fmt: $(SOURCES_NO_VENDOR) + ./etc/fmt.sh + +checkfmt: + ./etc/checkfmt.sh + $(GO_RUN) github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker + +tidy: + GO111MODULE=on go mod tidy + +checktidy: + ./etc/checktidy.sh + +checkgenerate: + ./etc/checkgenerate.sh + +checksqlmigrations: + ./etc/check-sql-migrations.sh + +# generate-web-assets outputs all the files needed to link the UI to the back-end. +# Currently, none of these files are tracked by git. +generate-web-assets: static/static_gen.go + +# generate-sources outputs all the Go files generated from protobufs, tmpls, and other tooling. +# These files are tracked by git; CI will enforce that they are up-to-date. +generate-sources: protoc tmpl stringer goimports + $(GO_GENERATE) ./influxql/... ./models/... ./pkg/... ./storage/... ./tsdb/... ./v1/... + +generate: generate-web-assets generate-sources + +protoc: + $(GO_INSTALL) google.golang.org/protobuf/cmd/protoc-gen-go@v1.27.1 + +tmpl: + $(GO_INSTALL) github.com/benbjohnson/tmpl + +stringer: + $(GO_INSTALL) golang.org/x/tools/cmd/stringer + +goimports: + $(GO_INSTALL) golang.org/x/tools/cmd/goimports + +test-go: + $(GO_TEST) $(GO_TEST_PATHS) + +test-flux: + @./etc/test-flux.sh + +test-tls: + @./etc/test-tls.sh + +test-integration: GO_TAGS=integration +test-integration: + $(GO_TEST) -count=1 $(GO_TEST_PATHS) + +test: test-go + +test-go-race: + $(GO_TEST) -v -race -count=1 $(GO_TEST_PATHS) + +vet: + $(GO_VET) -v ./... + +bench: + $(GO_TEST) -bench=. -run=^$$ ./... + +build: all + +pkg-config: + $(GO_INSTALL) github.com/influxdata/pkg-config + +clean: + $(RM) -r static/static_gen.go static/data + $(RM) -r bin + $(RM) -r dist + +# generate feature flags +flags: + $(GO_GENERATE) ./kit/feature + +docker-image-influx: + @cp .gitignore .dockerignore + @docker image build -t influxdb:dev --target influx . + +dshell-image: + @cp .gitignore .dockerignore + @docker image build --build-arg "USERID=$(shell id -u)" -t influxdb:dshell --target dshell . + +dshell: dshell-image + @docker container run --rm -p 8086:8086 -p 8080:8080 -u $(shell id -u) -it -v $(shell pwd):/code -w /code influxdb:dshell + +# .PHONY targets represent actions that do not create an actual file. +.PHONY: all $(SUBDIRS) run fmt checkfmt tidy checktidy checkgenerate test test-go test-go-race test-tls bench clean node_modules vet nightly dist protoc influxd libflux flags dshell dclean docker-image-flux docker-image-influx pkg-config diff --git a/Makefile b/Makefile deleted file mode 100644 index e812cba34da..00000000000 --- a/Makefile +++ /dev/null @@ -1,240 +0,0 @@ -# Top level Makefile for the entire project -# -# This Makefile encodes the "go generate" prerequisites ensuring that the proper tooling is installed and -# that the generate steps are executed when their prerequisite files change. -# -# This Makefile follows a few conventions: -# -# * All cmds must be added to this top level Makefile. -# * All binaries are placed in ./bin, its recommended to add this directory to your PATH. -# * Each package that has a need to run go generate, must have its own Makefile for that purpose. -# * All recursive Makefiles must support the all and clean targets -# - -# SUBDIRS are directories that have their own Makefile. -# It is required that all SUBDIRS have the `all` and `clean` targets. -SUBDIRS := http ui chronograf storage - -export GOPATH=$(shell go env GOPATH) -export GOOS=$(shell go env GOOS) -export GOARCH=$(shell go env GOARCH) - -ifeq ($(GOARCH), amd64) - # Including the assets tag requires the UI to be built for compilation to succeed. - # Don't force it for running tests. - GO_TEST_TAGS := - GO_BUILD_TAGS := assets -else - # noasm needed to avoid a panic in Flux for non-amd64. - GO_TEST_TAGS := noasm - GO_BUILD_TAGS := assets,noasm -endif - -GO_TEST_ARGS := -tags '$(GO_TEST_TAGS)' -GO_BUILD_ARGS := -tags '$(GO_BUILD_TAGS)' - -ifeq ($(OS), Windows_NT) - VERSION := $(shell git describe --exact-match --tags 2>nil) -else - VERSION := $(shell git describe --exact-match --tags 2>/dev/null) -endif -COMMIT := $(shell git rev-parse --short HEAD) - -LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -ifdef VERSION - LDFLAGS += -X main.version=$(VERSION) -endif - -# Allow for `go test` to be swapped out by other tooling, i.e. `gotestsum` -GO_TEST_CMD=go test -# Allow for a subset of tests to be specified. -GO_TEST_PATHS=./... - -# Test vars can be used by all recursive Makefiles -export PKG_CONFIG:=$(PWD)/scripts/pkg-config.sh -export GO_BUILD=env GO111MODULE=on go build $(GO_BUILD_ARGS) -ldflags "$(LDFLAGS)" -export GO_BUILD_SM=env GO111MODULE=on go build $(GO_BUILD_ARGS) -ldflags "-s -w $(LDFLAGS)" -export GO_INSTALL=env GO111MODULE=on go install $(GO_BUILD_ARGS) -ldflags "$(LDFLAGS)" -export GO_TEST=env GOTRACEBACK=all GO111MODULE=on $(GO_TEST_CMD) $(GO_TEST_ARGS) -# Do not add GO111MODULE=on to the call to go generate so it doesn't pollute the environment. -export GO_GENERATE=go generate $(GO_BUILD_ARGS) -export GO_VET=env GO111MODULE=on go vet $(GO_TEST_ARGS) -export GO_RUN=env GO111MODULE=on go run $(GO_BUILD_ARGS) -export PATH := $(PWD)/bin/$(GOOS):$(PATH) - - -# All go source files -SOURCES := $(shell find . -name '*.go' -not -name '*_test.go') go.mod go.sum - -# All go source files excluding the vendored sources. -SOURCES_NO_VENDOR := $(shell find . -path ./vendor -prune -o -name "*.go" -not -name '*_test.go' -print) - -# All assets for chronograf -UISOURCES := $(shell find ui -type f -not \( -path ui/build/\* -o -path ui/node_modules/\* -o -path ui/.cache/\* -o -name Makefile -prune \) ) - -# All precanned dashboards -PRECANNED := $(shell find chronograf/canned -name '*.json') - -# List of binary cmds to build -CMDS := \ - bin/$(GOOS)/influx \ - bin/$(GOOS)/influxd - -all: $(SUBDIRS) generate $(CMDS) - -# Target to build subdirs. -# Each subdirs must support the `all` target. -$(SUBDIRS): - $(MAKE) -C $@ all - -# -# Define targets for commands -# -bin/$(GOOS)/influxd: $(SOURCES) - $(GO_BUILD) -o $@ ./cmd/$(shell basename "$@") - -bin/$(GOOS)/influx: $(SOURCES) - $(GO_BUILD_SM) -o $@ ./cmd/$(shell basename "$@") - -# Ease of use build for just the go binary -influxd: bin/$(GOOS)/influxd - -influx: bin/$(GOOS)/influx - -# -# Define targets for the web ui -# - -node_modules: ui/node_modules - -# phony target to wait for server to be alive -ping: - ./etc/pinger.sh - -e2e: ping - make -C ui e2e - -chronograf_lint: - make -C ui lint - -ui/node_modules: - make -C ui node_modules - -ui_client: - make -C ui client - -# -# Define action only targets -# - -fmt: $(SOURCES_NO_VENDOR) - gofmt -w -s $^ - -checkfmt: - ./etc/checkfmt.sh - $(GO_RUN) github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker - -tidy: - GO111MODULE=on go mod tidy - -checktidy: - ./etc/checktidy.sh - -checkgenerate: - ./etc/checkgenerate.sh - -checkcommit: - # ./etc/circle-detect-committed-binaries.sh - -generate: $(SUBDIRS) - -test-js: node_modules - make -C ui test - -test-go: - $(GO_TEST) $(GO_TEST_PATHS) - -test-influxql-integration: - $(GO_TEST) -mod=readonly ./influxql/_v1tests - -test-influxql-validation: - $(GO_TEST) -mod=readonly ./influxql/_v1validation - -test-integration: GO_TAGS=integration -test-integration: - $(GO_TEST) -count=1 $(GO_TEST_PATHS) - -test: test-go test-js - -test-go-race: - $(GO_TEST) -v -race -count=1 $(GO_TEST_PATHS) - -vet: - $(GO_VET) -v ./... - -bench: - $(GO_TEST) -bench=. -run=^$$ ./... - -build: all - -pkg-config: - go build -o $(GOPATH)/bin/pkg-config github.com/influxdata/pkg-config - -# Parallelism for goreleaser must be set to 1 so it doesn't -# attempt to invoke pkg-config, which invokes cargo, -# for multiple targets at the same time. -dist: pkg-config - goreleaser build -p 1 --skip-validate --rm-dist - -release: pkg-config - goreleaser release -p 1 --rm-dist - -nightly: pkg-config - goreleaser release -p 1 --skip-validate --rm-dist --config=.goreleaser-nightly.yml - -clean: - @for d in $(SUBDIRS); do $(MAKE) -C $$d clean; done - $(RM) -r bin - $(RM) -r dist - -define CHRONOGIRAFFE - ._ o o - \_`-)|_ - ,"" _\_ - ," ## | 0 0. - ," ## ,-\__ `. - ," / `--._;) - "HAI, I'm Chronogiraffe. Let's be friends!" - ," ## / -," ## / -endef -export CHRONOGIRAFFE -chronogiraffe: $(SUBDIRS) generate $(CMDS) - @echo "$$CHRONOGIRAFFE" - -run: chronogiraffe - ./bin/$(GOOS)/influxd --assets-path=ui/build - -run-e2e: chronogiraffe - ./bin/$(GOOS)/influxd --assets-path=ui/build --e2e-testing --store=memory - -# generate feature flags -flags: - $(GO_GENERATE) ./kit/feature - -docker-image-influx: - @cp .gitignore .dockerignore - @docker image build -t influxdb:dev --target influx . - -docker-image-ui: - @cp .gitignore .dockerignore - @docker image build -t influxui:dev --target ui . - -dshell-image: - @cp .gitignore .dockerignore - @docker image build --build-arg "USERID=$(shell id -u)" -t influxdb:dshell --target dshell . - -dshell: dshell-image - @docker container run --rm -p 8086:8086 -p 8080:8080 -u $(shell id -u) -it -v $(shell pwd):/code -w /code influxdb:dshell - -# .PHONY targets represent actions that do not create an actual file. -.PHONY: all $(SUBDIRS) run fmt checkfmt tidy checktidy checkgenerate test test-go test-js test-go-race bench clean node_modules vet nightly chronogiraffe dist ping protoc e2e run-e2e influxd libflux flags dshell dclean docker-image-flux docker-image-influx pkg-config diff --git a/README.md b/README.md index 8854cbac256..4505bf3894d 100644 --- a/README.md +++ b/README.md @@ -3,19 +3,19 @@ InfluxDB is an open source time series platform. This includes APIs for storing and querying data, processing it in the background for ETL or monitoring and alerting purposes, user dashboards, and visualizing and exploring the data and more. The master branch on this repo now represents the latest InfluxDB, which now includes functionality for Kapacitor (background processing) and Chronograf (the UI) all in a single binary. -The list of InfluxDB Client Libraries that are compatible with the latest version can be found in [our documentation](https://v2.docs.influxdata.com/v2.0/reference/api/client-libraries/). +The list of InfluxDB Client Libraries that are compatible with the latest version can be found in [our documentation](https://docs.influxdata.com/influxdb/latest/tools/client-libraries/). If you are looking for the 1.x line of releases, there are branches for each minor version as well as a `master-1.x` branch that will contain the code for the next 1.x release. The master-1.x [working branch is here](https://github.com/influxdata/influxdb/tree/master-1.x). The [InfluxDB 1.x Go Client can be found here](https://github.com/influxdata/influxdb1-client). -## Installing +## Install We have nightly and versioned Docker images, Debian packages, RPM packages, and tarballs of InfluxDB available at the [InfluxData downloads page](https://portal.influxdata.com/downloads/). We also provide the `influx` command line interface (CLI) client as a separate binary available at the same location. If you are interested in building from source, see the [building from source](CONTRIBUTING.md#building-from-source) guide for contributors. -## Getting Started +## Get Started -For a complete getting started guide, please see our full [online documentation site](https://docs.influxdata.com/influxdb/v2.0/). +For a complete getting started guide, please see our full [online documentation site](https://docs.influxdata.com/influxdb/latest/). To write and query data or use the API in any way, you'll need to first create a user, credentials, organization and bucket. Everything in InfluxDB is organized under a concept of an organization. The API is designed to be multi-tenant. @@ -32,9 +32,9 @@ $ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup Welcome to InfluxDB 2.0! Please type your primary username: marty -Please type your password: +Please type your password: -Please type your password again: +Please type your password again: Please type your primary organization name.: InfluxData @@ -73,10 +73,10 @@ Once setup is complete, a configuration profile is created to allow you to inter ```bash $ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx config Active Name URL Org -* default http://localhost:9999 InfluxData +* default http://localhost:8086 InfluxData ``` -## Writing Data +## Write Data Write to measurement `m`, with tag `v=2`, in bucket `telegraf`, which belongs to organization `InfluxData`: ```bash @@ -104,13 +104,20 @@ Table: keys: [_start, _stop, _field, _measurement] 2019-12-30T22:19:39.043918000Z 2019-12-30T23:19:39.043918000Z v m 2019-12-30T23:17:02.000000000Z 2 ``` -Use the `-r, --raw` option to return the raw flux response from the query. This is useful for moving data from one instance to another as the `influx write` command can accept the Flux response using the `--format csv` option. +Use the `-r, --raw` option to return the raw flux response from the query. This is useful for moving data from one instance to another as the `influx write` command can accept the Flux response using the `--format csv` option. -## Introducing Flux +## Script with Flux -Flux is an MIT-licensed data scripting language (previously named IFQL) used for querying time series data from InfluxDB. The source for Flux is [available on GitHub](https://github.com/influxdata/flux). Learn more about Flux from [CTO Paul Dix's presentation](https://speakerdeck.com/pauldix/flux-number-fluxlang-a-new-time-series-data-scripting-language). +Flux (previously named IFQL) is an open source functional data scripting language designed for querying, analyzing, and acting on data. Flux supports multiple data source types, including: -## Contributing to the Project +- Time series databases (such as InfluxDB) +- Relational SQL databases (such as MySQL and PostgreSQL) +- CSV + +The source for Flux is [available on GitHub](https://github.com/influxdata/flux). +To learn more about Flux, see the latest [InfluxData Flux documentation](https://docs.influxdata.com/flux/) and [CTO Paul Dix's presentation](https://speakerdeck.com/pauldix/flux-number-fluxlang-a-new-time-series-data-scripting-language). + +## Contribute to the Project InfluxDB is an [MIT licensed](LICENSE) open source project and we love our community. The fastest way to get something fixed is to open a PR. Check out our [contributing](CONTRIBUTING.md) guide if you're interested in helping out. Also, join us on our [Community Slack Workspace](https://influxdata.com/slack) if you have questions or comments for our engineering teams. @@ -134,7 +141,7 @@ Generally, code must be adjusted to satisfy these tools, though there are except - [go vet](https://golang.org/cmd/vet/) checks for Go code that should be considered incorrect. - [go fmt](https://golang.org/cmd/gofmt/) checks that Go code is correctly formatted. - [go mod tidy](https://tip.golang.org/cmd/go/#hdr-Add_missing_and_remove_unused_modules) ensures that the source code and go.mod agree. -- [staticcheck](http://next.staticcheck.io/docs/) checks for things like: unused code, code that can be simplified, code that is incorrect and code that will have performance issues. +- [staticcheck](https://staticcheck.io/docs/) checks for things like: unused code, code that can be simplified, code that is incorrect and code that will have performance issues. ### staticcheck @@ -164,7 +171,7 @@ If you re-generate a file and find that `staticcheck` has failed, please see thi #### End-to-End Tests -CI also runs end-to-end tests. These test the integration between the influx server the ui. You can run them locally in two steps: - -- Start the server in "testing mode" by running `make run-e2e`. -- Run the tests with `make e2e`. +CI also runs end-to-end tests. These test the integration between the `influxd` server the UI. +Since the UI is used by interal repositories as well as the `influxdb` repository, the +end-to-end tests cannot be run on forked pull requests or run locally. The extent of end-to-end +testing required for forked pull requests will be determined as part of the review process. diff --git a/annotation.go b/annotation.go new file mode 100644 index 00000000000..7e969870580 --- /dev/null +++ b/annotation.go @@ -0,0 +1,483 @@ +package influxdb + +import ( + "context" + "database/sql/driver" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + "unicode/utf8" + + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" +) + +var ( + errEmptySummary = &errors.Error{ + Code: errors.EInvalid, + Msg: "summary cannot be empty", + } + errSummaryTooLong = &errors.Error{ + Code: errors.EInvalid, + Msg: "summary must be less than 255 characters", + } + errStreamTagTooLong = &errors.Error{ + Code: errors.EInvalid, + Msg: "stream tag must be less than 255 characters", + } + errStreamNameTooLong = &errors.Error{ + Code: errors.EInvalid, + Msg: "stream name must be less than 255 characters", + } + errStreamDescTooLong = &errors.Error{ + Code: errors.EInvalid, + Msg: "stream description must be less than 1024 characters", + } + errStickerTooLong = &errors.Error{ + Code: errors.EInvalid, + Msg: "stickers must be less than 255 characters", + } + errMsgTooLong = &errors.Error{ + Code: errors.EInvalid, + Msg: "message must be less than 4096 characters", + } + errReversedTimes = &errors.Error{ + Code: errors.EInvalid, + Msg: "start time must come before end time", + } + errMissingStreamName = &errors.Error{ + Code: errors.EInvalid, + Msg: "stream name must be set", + } + errMissingStreamTagOrId = &errors.Error{ + Code: errors.EInvalid, + Msg: "stream tag or id must be set", + } + errMissingEndTime = &errors.Error{ + Code: errors.EInvalid, + Msg: "end time must be set", + } + errMissingStartTime = &errors.Error{ + Code: errors.EInvalid, + Msg: "start time must be set", + } +) + +func invalidStickerError(s string) error { + return &errors.Error{ + Code: errors.EInternal, + Msg: fmt.Sprintf("invalid sticker: %q", s), + } +} + +func stickerSliceToMap(stickers []string) (map[string]string, error) { + stickerMap := map[string]string{} + + for i := range stickers { + if stick0, stick1, found := strings.Cut(stickers[i], "="); found { + stickerMap[stick0] = stick1 + } else { + return nil, invalidStickerError(stickers[i]) + } + } + + return stickerMap, nil +} + +// AnnotationService is the service contract for Annotations +type AnnotationService interface { + // CreateAnnotations creates annotations. + CreateAnnotations(ctx context.Context, orgID platform.ID, create []AnnotationCreate) ([]AnnotationEvent, error) + // ListAnnotations lists all annotations matching the filter. + ListAnnotations(ctx context.Context, orgID platform.ID, filter AnnotationListFilter) ([]StoredAnnotation, error) + // GetAnnotation gets an annotation by id. + GetAnnotation(ctx context.Context, id platform.ID) (*StoredAnnotation, error) + // DeleteAnnotations deletes annotations matching the filter. + DeleteAnnotations(ctx context.Context, orgID platform.ID, delete AnnotationDeleteFilter) error + // DeleteAnnotation deletes an annotation by id. + DeleteAnnotation(ctx context.Context, id platform.ID) error + // UpdateAnnotation updates an annotation. + UpdateAnnotation(ctx context.Context, id platform.ID, update AnnotationCreate) (*AnnotationEvent, error) + + // ListStreams lists all streams matching the filter. + ListStreams(ctx context.Context, orgID platform.ID, filter StreamListFilter) ([]StoredStream, error) + // CreateOrUpdateStream creates or updates the matching stream by name. + CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream Stream) (*ReadStream, error) + // GetStream gets a stream by id. Currently this is only used for authorization, and there are no + // API routes for getting a single stream by ID. + GetStream(ctx context.Context, id platform.ID) (*StoredStream, error) + // UpdateStream updates the stream by the ID. + UpdateStream(ctx context.Context, id platform.ID, stream Stream) (*ReadStream, error) + // DeleteStreams deletes one or more streams by name. + DeleteStreams(ctx context.Context, orgID platform.ID, delete BasicStream) error + // DeleteStreamByID deletes the stream metadata by id. + DeleteStreamByID(ctx context.Context, id platform.ID) error +} + +// AnnotationEvent contains fields for annotating an event. +type AnnotationEvent struct { + ID platform.ID `json:"id,omitempty"` // ID is the annotation ID. + AnnotationCreate // AnnotationCreate defines the common input/output bits of an annotation. +} + +// AnnotationCreate contains user providable fields for annotating an event. +type AnnotationCreate struct { + StreamTag string `json:"stream,omitempty"` // StreamTag provides a means to logically group a set of annotated events. + Summary string `json:"summary"` // Summary is the only field required to annotate an event. + Message string `json:"message,omitempty"` // Message provides more details about the event being annotated. + Stickers AnnotationStickers `json:"stickers,omitempty"` // Stickers are like tags, but named something obscure to differentiate them from influx tags. They are there to differentiate an annotated event. + EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time of the event being annotated. Defaults to now if not set. + StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the start time of the event being annotated. Defaults to EndTime if not set. +} + +// StoredAnnotation represents annotation data to be stored in the database. +type StoredAnnotation struct { + ID platform.ID `db:"id"` // ID is the annotation's id. + OrgID platform.ID `db:"org_id"` // OrgID is the annotations's owning organization. + StreamID platform.ID `db:"stream_id"` // StreamID is the id of a stream. + StreamTag string `db:"stream"` // StreamTag is the name of a stream (when selecting with join of streams). + Summary string `db:"summary"` // Summary is the summary of the annotated event. + Message string `db:"message"` // Message is a longer description of the annotated event. + Stickers AnnotationStickers `db:"stickers"` // Stickers are additional labels to group annotations by. + Duration string `db:"duration"` // Duration is the time range (with zone) of an annotated event. + Lower string `db:"lower"` // Lower is the time an annotated event begins. + Upper string `db:"upper"` // Upper is the time an annotated event ends. +} + +// ToCreate is a utility method for converting a StoredAnnotation to an AnnotationCreate type +func (s StoredAnnotation) ToCreate() (*AnnotationCreate, error) { + et, err := time.Parse(time.RFC3339Nano, s.Upper) + if err != nil { + return nil, err + } + + st, err := time.Parse(time.RFC3339Nano, s.Lower) + if err != nil { + return nil, err + } + + return &AnnotationCreate{ + StreamTag: s.StreamTag, + Summary: s.Summary, + Message: s.Message, + Stickers: s.Stickers, + EndTime: &et, + StartTime: &st, + }, nil +} + +// ToEvent is a utility method for converting a StoredAnnotation to an AnnotationEvent type +func (s StoredAnnotation) ToEvent() (*AnnotationEvent, error) { + c, err := s.ToCreate() + if err != nil { + return nil, err + } + + return &AnnotationEvent{ + ID: s.ID, + AnnotationCreate: *c, + }, nil +} + +type AnnotationStickers map[string]string + +// Value implements the database/sql Valuer interface for adding AnnotationStickers to the database +// Stickers are stored in the database as a slice of strings like "[key=val]" +// They are encoded into a JSON string for storing into the database, and the JSON sqlite extension is +// able to manipulate them like an object. +func (a AnnotationStickers) Value() (driver.Value, error) { + stickSlice := make([]string, 0, len(a)) + + for k, v := range a { + stickSlice = append(stickSlice, fmt.Sprintf("%s=%s", k, v)) + } + + sticks, err := json.Marshal(stickSlice) + if err != nil { + return nil, err + } + + return string(sticks), nil +} + +// Scan implements the database/sql Scanner interface for retrieving AnnotationStickers from the database +// The string is decoded into a slice of strings, which are then converted back into a map +func (a *AnnotationStickers) Scan(value interface{}) error { + vString, ok := value.(string) + if !ok { + return &errors.Error{ + Code: errors.EInternal, + Msg: "could not load stickers from sqlite", + } + } + + var stickSlice []string + if err := json.NewDecoder(strings.NewReader(vString)).Decode(&stickSlice); err != nil { + return err + } + + stickMap, err := stickerSliceToMap(stickSlice) + if err != nil { + return nil + } + + *a = stickMap + return nil +} + +// Validate validates the creation object. +func (a *AnnotationCreate) Validate(nowFunc func() time.Time) error { + switch s := utf8.RuneCountInString(a.Summary); { + case s <= 0: + return errEmptySummary + case s > 255: + return errSummaryTooLong + } + + switch t := utf8.RuneCountInString(a.StreamTag); { + case t == 0: + a.StreamTag = "default" + case t > 255: + return errStreamTagTooLong + } + + if utf8.RuneCountInString(a.Message) > 4096 { + return errMsgTooLong + } + + for k, v := range a.Stickers { + if utf8.RuneCountInString(k) > 255 || utf8.RuneCountInString(v) > 255 { + return errStickerTooLong + } + } + + now := nowFunc() + if a.EndTime == nil { + a.EndTime = &now + } + + if a.StartTime == nil { + a.StartTime = a.EndTime + } + + if a.EndTime.Before(*(a.StartTime)) { + return errReversedTimes + } + + return nil +} + +// AnnotationDeleteFilter contains fields for deleting an annotated event. +type AnnotationDeleteFilter struct { + StreamTag string `json:"stream,omitempty"` // StreamTag provides a means to logically group a set of annotated events. + StreamID platform.ID `json:"streamID,omitempty"` // StreamID provides a means to logically group a set of annotated events. + Stickers map[string]string `json:"stickers,omitempty"` // Stickers are like tags, but named something obscure to differentiate them from influx tags. They are there to differentiate an annotated event. + EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time of the event being annotated. Defaults to now if not set. + StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the start time of the event being annotated. Defaults to EndTime if not set. +} + +// Validate validates the deletion object. +func (a *AnnotationDeleteFilter) Validate() error { + var errs []string + + if len(a.StreamTag) == 0 && !a.StreamID.Valid() { + errs = append(errs, errMissingStreamTagOrId.Error()) + } + + if a.EndTime == nil { + errs = append(errs, errMissingEndTime.Error()) + } + + if a.StartTime == nil { + errs = append(errs, errMissingStartTime.Error()) + } + + if len(errs) > 0 { + return &errors.Error{ + Code: errors.EInvalid, + Msg: strings.Join(errs, "; "), + } + } + + if a.EndTime.Before(*(a.StartTime)) { + return errReversedTimes + } + + return nil +} + +var dre = regexp.MustCompile(`stickers\[(.*)\]`) + +// SetStickers sets the stickers from the query parameters. +func (a *AnnotationDeleteFilter) SetStickers(vals map[string][]string) { + if a.Stickers == nil { + a.Stickers = map[string]string{} + } + + for k, v := range vals { + if ss := dre.FindStringSubmatch(k); len(ss) == 2 && len(v) > 0 { + a.Stickers[ss[1]] = v[0] + } + } +} + +// AnnotationList defines the structure of the response when listing annotations. +type AnnotationList struct { + StreamTag string `json:"stream"` + Annotations []ReadAnnotation `json:"annotations"` +} + +// ReadAnnotations allows annotations to be assigned to a stream. +type ReadAnnotations map[string][]ReadAnnotation + +// MarshalJSON allows us to marshal the annotations belonging to a stream properly. +func (s ReadAnnotations) MarshalJSON() ([]byte, error) { + annotationList := []AnnotationList{} + + for k, v := range s { + annotationList = append(annotationList, AnnotationList{ + StreamTag: k, + Annotations: v, + }) + } + + return json.Marshal(annotationList) +} + +// ReadAnnotation defines the simplest form of an annotation to be returned. Essentially, it's AnnotationEvent without stream info. +type ReadAnnotation struct { + ID platform.ID `json:"id"` // ID is the annotation's generated id. + Summary string `json:"summary"` // Summary is the only field required to annotate an event. + Message string `json:"message,omitempty"` // Message provides more details about the event being annotated. + Stickers map[string]string `json:"stickers,omitempty"` // Stickers are like tags, but named something obscure to differentiate them from influx tags. They are there to differentiate an annotated event. + EndTime string `json:"endTime"` // EndTime is the time of the event being annotated. + StartTime string `json:"startTime,omitempty"` // StartTime is the start time of the event being annotated. +} + +// AnnotationListFilter is a selection filter for listing annotations. +type AnnotationListFilter struct { + StickerIncludes AnnotationStickers `json:"stickerIncludes,omitempty"` // StickerIncludes allows the user to filter annotated events based on it's sticker. + StreamIncludes []string `json:"streamIncludes,omitempty"` // StreamIncludes allows the user to filter annotated events by stream. + BasicFilter +} + +// Validate validates the filter. +func (f *AnnotationListFilter) Validate(nowFunc func() time.Time) error { + return f.BasicFilter.Validate(nowFunc) +} + +var re = regexp.MustCompile(`stickerIncludes\[(.*)\]`) + +// SetStickerIncludes sets the stickerIncludes from the query parameters. +func (f *AnnotationListFilter) SetStickerIncludes(vals map[string][]string) { + if f.StickerIncludes == nil { + f.StickerIncludes = map[string]string{} + } + + for k, v := range vals { + if ss := re.FindStringSubmatch(k); len(ss) == 2 && len(v) > 0 { + f.StickerIncludes[ss[1]] = v[0] + } + } +} + +// StreamListFilter is a selection filter for listing streams. Streams are not considered first class resources, but depend on an annotation using them. +type StreamListFilter struct { + StreamIncludes []string `json:"streamIncludes,omitempty"` // StreamIncludes allows the user to filter streams returned. + BasicFilter +} + +// Validate validates the filter. +func (f *StreamListFilter) Validate(nowFunc func() time.Time) error { + return f.BasicFilter.Validate(nowFunc) +} + +// Stream defines the stream metadata. Used in create and update requests/responses. Delete requests will only require stream name. +type Stream struct { + Name string `json:"stream"` // Name is the name of a stream. + Description string `json:"description,omitempty"` // Description is more information about a stream. +} + +// ReadStream defines the returned stream. +type ReadStream struct { + ID platform.ID `json:"id" db:"id"` // ID is the id of a stream. + Name string `json:"stream" db:"name"` // Name is the name of a stream. + Description string `json:"description,omitempty" db:"description"` // Description is more information about a stream. + CreatedAt time.Time `json:"createdAt" db:"created_at"` // CreatedAt is a timestamp. + UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` // UpdatedAt is a timestamp. +} + +// IsValid validates the stream. +func (s *Stream) Validate(strict bool) error { + switch nameChars := utf8.RuneCountInString(s.Name); { + case nameChars <= 0: + if strict { + return errMissingStreamName + } + s.Name = "default" + case nameChars > 255: + return errStreamNameTooLong + } + + if utf8.RuneCountInString(s.Description) > 1024 { + return errStreamDescTooLong + } + + return nil +} + +// StoredStream represents stream data to be stored in the metadata database. +type StoredStream struct { + ID platform.ID `db:"id"` // ID is the stream's id. + OrgID platform.ID `db:"org_id"` // OrgID is the stream's owning organization. + Name string `db:"name"` // Name is the name of a stream. + Description string `db:"description"` // Description is more information about a stream. + CreatedAt time.Time `db:"created_at"` // CreatedAt is a timestamp. + UpdatedAt time.Time `db:"updated_at"` // UpdatedAt is a timestamp. +} + +// BasicStream defines a stream by name. Used for stream deletes. +type BasicStream struct { + Names []string `json:"stream"` +} + +// IsValid validates the stream is not empty. +func (s BasicStream) IsValid() bool { + if len(s.Names) <= 0 { + return false + } + + for i := range s.Names { + if len(s.Names[i]) <= 0 { + return false + } + } + + return true +} + +// BasicFilter defines common filter options. +type BasicFilter struct { + StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the time the event being annotated started. + EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time the event being annotated ended. +} + +// Validate validates the basic filter options, setting sane defaults where appropriate. +func (f *BasicFilter) Validate(nowFunc func() time.Time) error { + now := nowFunc().UTC().Truncate(time.Second) + if f.EndTime == nil || f.EndTime.IsZero() { + f.EndTime = &now + } + + if f.StartTime == nil { + f.StartTime = &time.Time{} + } + + if f.EndTime.Before(*(f.StartTime)) { + return errReversedTimes + } + + return nil +} diff --git a/annotation_test.go b/annotation_test.go new file mode 100644 index 00000000000..e8c48dafc22 --- /dev/null +++ b/annotation_test.go @@ -0,0 +1,592 @@ +package influxdb + +import ( + "encoding/json" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" + "github.com/stretchr/testify/require" +) + +var ( + testTime time.Time = time.Now() + testTime2 time.Time = testTime.Add(time.Minute) + + annID, _ = platform.IDFromString("2345678901234567") +) + +func nowFunc() time.Time { + return testTime +} + +func TestAnnotationCreate(t *testing.T) { + type tst struct { + name string + input AnnotationCreate + expected AnnotationCreate + err *errors.Error + } + + tests := []tst{ + { + name: "minimum valid create request", + input: AnnotationCreate{ + Summary: "this is a default annotation", + }, + expected: AnnotationCreate{ + StreamTag: "default", + Summary: "this is a default annotation", + EndTime: &testTime, + StartTime: &testTime, + }, + }, + { + name: "full valid create request", + input: AnnotationCreate{ + StreamTag: "other", + Summary: "this is another annotation", + Message: "This is a much longer description or message to add to the annotation summary", + Stickers: map[string]string{"product": "cloud"}, + EndTime: &testTime2, + StartTime: &testTime, + }, + expected: AnnotationCreate{ + StreamTag: "other", + Summary: "this is another annotation", + Message: "This is a much longer description or message to add to the annotation summary", + Stickers: map[string]string{"product": "cloud"}, + EndTime: &testTime2, + StartTime: &testTime, + }, + }, + { + name: "empty create request", + input: AnnotationCreate{}, + err: errEmptySummary, + }, + { + name: "end time before start create request", + input: AnnotationCreate{ + Summary: "this is a default annotation", + EndTime: &testTime, + StartTime: &testTime2, + }, + err: errReversedTimes, + }, + { + name: "default end time before start create request", + input: AnnotationCreate{ + Summary: "this is a default annotation", + StartTime: &testTime2, + }, + err: errReversedTimes, + }, + { + name: "summary too long", + input: AnnotationCreate{ + Summary: strings.Repeat("a", 256), + }, + err: errSummaryTooLong, + }, + { + name: "message too long", + input: AnnotationCreate{ + Summary: "longTom", + Message: strings.Repeat("a", 4097), + }, + err: errMsgTooLong, + }, + { + name: "stream tag too long", + input: AnnotationCreate{ + Summary: "longTom", + StreamTag: strings.Repeat("a", 256), + }, + err: errStreamTagTooLong, + }, + { + name: "sticker key too long", + input: AnnotationCreate{ + Summary: "longTom", + Stickers: map[string]string{strings.Repeat("1", 256): "val"}, + }, + err: errStickerTooLong, + }, + { + name: "sticker val too long", + input: AnnotationCreate{ + Summary: "longTom", + Stickers: map[string]string{"key": strings.Repeat("1", 256)}, + }, + err: errStickerTooLong, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.input.Validate(nowFunc) + if test.err != nil { + require.Equal(t, test.err, err) + return + } + + require.NoError(t, err) + require.Equal(t, test.expected, test.input) + }) + } +} + +func TestDeleteFilter(t *testing.T) { + type tst struct { + name string + input AnnotationDeleteFilter + expected AnnotationDeleteFilter + err *errors.Error + } + + tests := []tst{ + { + name: "minimum valid delete", + input: AnnotationDeleteFilter{ + StreamTag: "default", + EndTime: &testTime, + StartTime: &testTime, + }, + expected: AnnotationDeleteFilter{ + StreamTag: "default", + EndTime: &testTime, + StartTime: &testTime, + }, + }, + { + name: "full valid delete", + input: AnnotationDeleteFilter{ + StreamTag: "default", + Stickers: map[string]string{"product": "oss"}, + EndTime: &testTime, + StartTime: &testTime, + }, + expected: AnnotationDeleteFilter{ + StreamTag: "default", + Stickers: map[string]string{"product": "oss"}, + EndTime: &testTime, + StartTime: &testTime, + }, + }, + { + name: "missing stream tag", + input: AnnotationDeleteFilter{ + Stickers: map[string]string{"product": "oss"}, + EndTime: &testTime, + StartTime: &testTime, + }, + err: errMissingStreamTagOrId, + }, + { + name: "missing start time", + input: AnnotationDeleteFilter{ + StreamTag: "default", + Stickers: map[string]string{"product": "oss"}, + EndTime: &testTime, + }, + err: errMissingStartTime, + }, + { + name: "missing end time", + input: AnnotationDeleteFilter{ + StreamTag: "default", + Stickers: map[string]string{"product": "oss"}, + StartTime: &testTime, + }, + err: errMissingEndTime, + }, + { + name: "end time before start create request", + input: AnnotationDeleteFilter{ + StreamTag: "default", + Stickers: map[string]string{"product": "oss"}, + EndTime: &testTime, + StartTime: &testTime2, + }, + err: errReversedTimes, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.input.Validate() + if test.err != nil { + require.Equal(t, test.err, err) + return + } + + require.NoError(t, err) + require.Equal(t, test.expected, test.input) + }) + } +} + +func TestAnnotationListFilter(t *testing.T) { + type tst struct { + name string + input AnnotationListFilter + expected AnnotationListFilter + checkValue bool + err *errors.Error + } + + tests := []tst{ + { + name: "minimum valid", + input: AnnotationListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime, + }, + }, + expected: AnnotationListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime, + }, + }, + }, + { + name: "empty valid", + input: AnnotationListFilter{}, + expected: AnnotationListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime, + }, + }, + checkValue: true, + }, + { + name: "invalid due to reversed times", + input: AnnotationListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime2, + }, + }, + err: errReversedTimes, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.input.Validate(nowFunc) + if test.err != nil { + require.Equal(t, test.err, err) + return + } + + require.NoError(t, err) + if test.checkValue { + require.Equal(t, *test.expected.BasicFilter.StartTime, *test.expected.BasicFilter.EndTime) + } else { + require.Equal(t, test.expected, test.input) + } + }) + } +} + +func TestStreamListFilter(t *testing.T) { + type tst struct { + name string + input StreamListFilter + expected StreamListFilter + checkValue bool + err *errors.Error + } + + tests := []tst{ + { + name: "minimum valid", + input: StreamListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime, + }, + }, + expected: StreamListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime, + }, + }, + }, + { + name: "empty valid", + input: StreamListFilter{}, + expected: StreamListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime, + }, + }, + checkValue: true, + }, + { + name: "invalid due to reversed times", + input: StreamListFilter{ + BasicFilter: BasicFilter{ + EndTime: &testTime, + StartTime: &testTime2, + }, + }, + err: errReversedTimes, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.input.Validate(nowFunc) + if test.err != nil { + require.Equal(t, test.err, err) + return + } + + require.NoError(t, err) + if test.checkValue { + require.Equal(t, *test.expected.BasicFilter.StartTime, *test.expected.BasicFilter.EndTime) + } else { + require.Equal(t, test.expected, test.input) + } + }) + } +} + +func TestStreamIsValid(t *testing.T) { + type tst struct { + name string + input Stream + err *errors.Error + } + + tests := []tst{ + { + name: "minimum valid", + input: Stream{ + Name: "default", + }, + }, + { + name: "empty valid", + input: Stream{}, + }, + { + name: "invalid name too long", + input: Stream{ + Name: strings.Repeat("a", 512), + }, + err: errStreamNameTooLong, + }, + { + name: "invalid description too long", + input: Stream{ + Name: "longTom", + Description: strings.Repeat("a", 2048), + }, + err: errStreamDescTooLong, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.err != nil { + require.Equal(t, test.err, test.input.Validate(false)) + } else { + require.NoError(t, test.input.Validate(false)) + } + }) + } +} + +func TestBasicStreamIsValid(t *testing.T) { + type tst struct { + name string + input BasicStream + expected bool + } + + tests := []tst{ + { + name: "minimum valid", + input: BasicStream{ + Names: []string{"default"}, + }, + expected: true, + }, + { + name: "invalid", + input: BasicStream{}, + expected: false, + }, + { + name: "empty name", + input: BasicStream{Names: []string{""}}, + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.expected, test.input.IsValid()) + }) + } +} + +func TestMashallReadAnnotations(t *testing.T) { + ra := ReadAnnotations{ + "default": []ReadAnnotation{ + { + ID: *annID, + Summary: "this is one annotation", + Stickers: map[string]string{"env": "testing"}, + StartTime: testTime.Format(time.RFC3339Nano), + EndTime: testTime2.Format(time.RFC3339Nano), + }, + { + ID: *annID, + Summary: "this is another annotation", + Stickers: map[string]string{"env": "testing"}, + StartTime: testTime.Format(time.RFC3339Nano), + EndTime: testTime.Format(time.RFC3339Nano), + }, + }, + "testing": []ReadAnnotation{ + { + ID: *annID, + Summary: "this is yet another annotation", + Stickers: map[string]string{"env": "testing"}, + StartTime: testTime.Format(time.RFC3339Nano), + EndTime: testTime.Format(time.RFC3339Nano), + }, + }, + } + + b, err := json.Marshal(ra) + require.NoError(t, err) + require.Greater(t, len(b), 0) +} + +func TestSetStickerIncludes(t *testing.T) { + type tst struct { + name string + input map[string][]string + expected AnnotationStickers + } + + tests := []tst{ + { + name: "with stickerIncludes", + input: map[string][]string{ + "stickerIncludes[product]": {"oss"}, + "stickerIncludes[author]": {"russ"}, + "streams": {"default", "blogs"}, + }, + expected: map[string]string{ + "product": "oss", + "author": "russ", + }, + }, + { + name: "no sticker includes", + input: map[string][]string{ + "startTime": {"2021-01-13T22%3A17%3A37.953Z"}, + "endTime": {"2021-01-13T22%3A17%3A37.953Z"}, + "streams": {"default", "blogs"}, + }, + expected: map[string]string{}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + f := AnnotationListFilter{} + f.SetStickerIncludes(test.input) + require.Equal(t, test.expected, f.StickerIncludes) + }) + } +} + +func TestSetStickers(t *testing.T) { + type tst struct { + name string + input map[string][]string + expected map[string]string + } + + tests := []tst{ + { + name: "with stickers", + input: map[string][]string{ + "stickers[product]": {"oss"}, + "stickers[author]": {"russ"}, + "streams": {"default", "blogs"}, + }, + expected: map[string]string{ + "product": "oss", + "author": "russ", + }, + }, + { + name: "no stickers", + input: map[string][]string{ + "startTime": {"2021-01-13T22%3A17%3A37.953Z"}, + "endTime": {"2021-01-13T22%3A17%3A37.953Z"}, + "streams": {"default", "blogs"}, + }, + expected: map[string]string{}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + f := AnnotationDeleteFilter{} + f.SetStickers(test.input) + require.Equal(t, test.expected, f.Stickers) + }) + } +} + +func TestStickerSliceToMap(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + stickers []string + want map[string]string + wantErr error + }{ + { + "good stickers", + []string{"good1=val1", "good2=val2"}, + map[string]string{"good1": "val1", "good2": "val2"}, + nil, + }, + { + "bad stickers", + []string{"this is an invalid sticker", "shouldbe=likethis"}, + nil, + invalidStickerError("this is an invalid sticker"), + }, + { + "no stickers", + []string{}, + map[string]string{}, + nil, + }, + } + + for _, tt := range tests { + got, err := stickerSliceToMap(tt.stickers) + require.Equal(t, tt.want, got) + require.Equal(t, tt.wantErr, err) + } +} diff --git a/annotations/README.md b/annotations/README.md new file mode 100644 index 00000000000..9644528643c --- /dev/null +++ b/annotations/README.md @@ -0,0 +1,33 @@ +## Annotations + +This package provides an HTTP API for interacting with both annotations and +streams independently. The HTTP handlers are located in the `transport` folder. +The code for interacting with the sqlite datastore is located in the +`service.go` file. Definitions for the basic types & interfaces associated with +annotations and streams used throughout the platform are located in the +top-level `influxdb` package, in the `annotation.go` file. + +### Anatomy + +An annotation is, at its simplest, a textual note on a range of time. The start +and stop time of that range can be the same point in time, which represents an +annotation at a single instance. Annotations can also have "stickers". +"Stickers" allow users to "tag" the annotation with further granularity for +filtering in key-value pairs. Some examples of sticker key-value pairs are: +`"product: oss"`, `"product: cloud"`, or `"service: tasks"`, but keys and values +can be any string. + +Every annotation belongs to a single "stream". A "stream" represents a logical +grouping of annotated events. Some examples of stream names are: `"incidents"`, +`"deployments"`, or `"marketing"`, but can be any string. A stream can also have +a description to further clarify what annotated events may be expected in the +stream. + +### Use + +Requested annotations may be filtered by stream name, stickers, and/or time +range. Streams may also be retrieved, in order to view their description. If a +stream is deleted, all annotations associated with that stream are deleted as +well. Every annotation that is created must have a stream associated with it - +if a stream name is not provided when creating an annotation, it will be +assigned to the default stream. diff --git a/annotations/middleware_logging.go b/annotations/middleware_logging.go new file mode 100644 index 00000000000..b1471d7d7ab --- /dev/null +++ b/annotations/middleware_logging.go @@ -0,0 +1,168 @@ +package annotations + +import ( + "context" + "time" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "go.uber.org/zap" +) + +func NewLoggingService(logger *zap.Logger, underlying influxdb.AnnotationService) *loggingService { + return &loggingService{ + logger: logger, + underlying: underlying, + } +} + +type loggingService struct { + logger *zap.Logger + underlying influxdb.AnnotationService +} + +var _ influxdb.AnnotationService = (*loggingService)(nil) + +func (l loggingService) CreateAnnotations(ctx context.Context, orgID platform.ID, create []influxdb.AnnotationCreate) (an []influxdb.AnnotationEvent, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to create annotations", zap.Error(err), dur) + return + } + l.logger.Debug("annotations create", dur) + }(time.Now()) + return l.underlying.CreateAnnotations(ctx, orgID, create) +} + +func (l loggingService) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) (an []influxdb.StoredAnnotation, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to find annotations", zap.Error(err), dur) + return + } + l.logger.Debug("annotations find", dur) + }(time.Now()) + return l.underlying.ListAnnotations(ctx, orgID, filter) +} + +func (l loggingService) GetAnnotation(ctx context.Context, id platform.ID) (an *influxdb.StoredAnnotation, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to find annotation by ID", zap.Error(err), dur) + return + } + l.logger.Debug("annotation find by ID", dur) + }(time.Now()) + return l.underlying.GetAnnotation(ctx, id) +} + +func (l loggingService) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) (err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to delete annotations", zap.Error(err), dur) + return + } + l.logger.Debug("annotations delete", dur) + }(time.Now()) + return l.underlying.DeleteAnnotations(ctx, orgID, delete) +} + +func (l loggingService) DeleteAnnotation(ctx context.Context, id platform.ID) (err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to delete annotation", zap.Error(err), dur) + return + } + l.logger.Debug("annotation delete", dur) + }(time.Now()) + return l.underlying.DeleteAnnotation(ctx, id) +} + +func (l loggingService) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (an *influxdb.AnnotationEvent, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to update annotation", zap.Error(err), dur) + return + } + l.logger.Debug("annotation update", dur) + }(time.Now()) + return l.underlying.UpdateAnnotation(ctx, id, update) +} + +func (l loggingService) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) (stm []influxdb.StoredStream, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to find streams", zap.Error(err), dur) + return + } + l.logger.Debug("streams find", dur) + }(time.Now()) + return l.underlying.ListStreams(ctx, orgID, filter) +} + +func (l loggingService) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (stm *influxdb.ReadStream, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to create or update stream", zap.Error(err), dur) + return + } + l.logger.Debug("stream create or update", dur) + }(time.Now()) + return l.underlying.CreateOrUpdateStream(ctx, orgID, stream) +} + +func (l loggingService) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (stm *influxdb.ReadStream, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to update stream", zap.Error(err), dur) + return + } + l.logger.Debug("stream update", dur) + }(time.Now()) + return l.underlying.UpdateStream(ctx, id, stream) +} + +func (l loggingService) GetStream(ctx context.Context, id platform.ID) (stm *influxdb.StoredStream, err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to find stream by ID", zap.Error(err), dur) + return + } + l.logger.Debug("stream find by ID", dur) + }(time.Now()) + return l.underlying.GetStream(ctx, id) +} + +func (l loggingService) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) (err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to delete streams", zap.Error(err), dur) + return + } + l.logger.Debug("streams delete", dur) + }(time.Now()) + return l.underlying.DeleteStreams(ctx, orgID, delete) +} + +func (l loggingService) DeleteStreamByID(ctx context.Context, id platform.ID) (err error) { + defer func(start time.Time) { + dur := zap.Duration("took", time.Since(start)) + if err != nil { + l.logger.Debug("failed to delete stream", zap.Error(err), dur) + return + } + l.logger.Debug("stream delete", dur) + }(time.Now()) + return l.underlying.DeleteStreamByID(ctx, id) +} diff --git a/annotations/middleware_metrics.go b/annotations/middleware_metrics.go new file mode 100644 index 00000000000..d7609348772 --- /dev/null +++ b/annotations/middleware_metrics.go @@ -0,0 +1,94 @@ +package annotations + +import ( + "context" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/metric" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/prometheus/client_golang/prometheus" +) + +func NewMetricCollectingService(reg prometheus.Registerer, underlying influxdb.AnnotationService, opts ...metric.ClientOptFn) *metricsService { + o := metric.ApplyMetricOpts(opts...) + return &metricsService{ + rec: metric.New(reg, o.ApplySuffix("annotation")), + underlying: underlying, + } +} + +type metricsService struct { + // RED metrics + rec *metric.REDClient + underlying influxdb.AnnotationService +} + +var _ influxdb.AnnotationService = (*metricsService)(nil) + +func (m metricsService) CreateAnnotations(ctx context.Context, orgID platform.ID, create []influxdb.AnnotationCreate) ([]influxdb.AnnotationEvent, error) { + rec := m.rec.Record("create_annotation") + ans, err := m.underlying.CreateAnnotations(ctx, orgID, create) + return ans, rec(err) +} + +func (m metricsService) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) ([]influxdb.StoredAnnotation, error) { + rec := m.rec.Record("find_annotations") + ans, err := m.underlying.ListAnnotations(ctx, orgID, filter) + return ans, rec(err) +} + +func (m metricsService) GetAnnotation(ctx context.Context, id platform.ID) (*influxdb.StoredAnnotation, error) { + rec := m.rec.Record("find_annotation_by_id") + an, err := m.underlying.GetAnnotation(ctx, id) + return an, rec(err) +} + +func (m metricsService) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) error { + rec := m.rec.Record("delete_annotations") + return rec(m.underlying.DeleteAnnotations(ctx, orgID, delete)) +} + +func (m metricsService) DeleteAnnotation(ctx context.Context, id platform.ID) error { + rec := m.rec.Record("delete_annotation") + return rec(m.underlying.DeleteAnnotation(ctx, id)) +} + +func (m metricsService) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (*influxdb.AnnotationEvent, error) { + rec := m.rec.Record("update_annotation") + an, err := m.underlying.UpdateAnnotation(ctx, id, update) + return an, rec(err) +} + +func (m metricsService) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) ([]influxdb.StoredStream, error) { + rec := m.rec.Record("find_streams") + stms, err := m.underlying.ListStreams(ctx, orgID, filter) + return stms, rec(err) +} + +func (m metricsService) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { + rec := m.rec.Record("create_or_update_stream") + stm, err := m.underlying.CreateOrUpdateStream(ctx, orgID, stream) + return stm, rec(err) +} + +func (m metricsService) GetStream(ctx context.Context, id platform.ID) (*influxdb.StoredStream, error) { + rec := m.rec.Record("find_stream_by_id") + stm, err := m.underlying.GetStream(ctx, id) + return stm, rec(err) +} + +func (m metricsService) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { + rec := m.rec.Record("update_stream") + stm, err := m.underlying.UpdateStream(ctx, id, stream) + return stm, rec(err) +} + +func (m metricsService) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) error { + rec := m.rec.Record("delete_streams") + return rec(m.underlying.DeleteStreams(ctx, orgID, delete)) +} + +func (m metricsService) DeleteStreamByID(ctx context.Context, id platform.ID) error { + rec := m.rec.Record("delete_stream") + return rec(m.underlying.DeleteStreamByID(ctx, id)) +} diff --git a/annotations/service.go b/annotations/service.go new file mode 100644 index 00000000000..7e607b8f2ea --- /dev/null +++ b/annotations/service.go @@ -0,0 +1,596 @@ +package annotations + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + sq "github.com/Masterminds/squirrel" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + ierrors "github.com/influxdata/influxdb/v2/kit/platform/errors" + "github.com/influxdata/influxdb/v2/snowflake" + "github.com/influxdata/influxdb/v2/sqlite" +) + +var ( + errAnnotationNotFound = &ierrors.Error{ + Code: ierrors.EInvalid, + Msg: "annotation not found", + } + errStreamNotFound = &ierrors.Error{ + Code: ierrors.EInvalid, + Msg: "stream not found", + } +) + +var _ influxdb.AnnotationService = (*Service)(nil) + +type Service struct { + store *sqlite.SqlStore + idGenerator platform.IDGenerator +} + +func NewService(store *sqlite.SqlStore) *Service { + return &Service{ + store: store, + idGenerator: snowflake.NewIDGenerator(), + } +} + +// CreateAnnotations creates annotations in the database for the provided orgID as defined by the provided list +// Streams corresponding to the StreamTag property of each annotation are created if they don't already exist +// as part of a transaction +func (s *Service) CreateAnnotations(ctx context.Context, orgID platform.ID, creates []influxdb.AnnotationCreate) ([]influxdb.AnnotationEvent, error) { + // Guard clause - an empty list was provided for some reason, immediately return an empty result + // set without doing the transaction + if len(creates) == 0 { + return []influxdb.AnnotationEvent{}, nil + } + + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + // store a unique list of stream names first. the invalid ID is a placeholder for the real id, + // which will be obtained separately + streamNamesIDs := make(map[string]platform.ID) + for _, c := range creates { + streamNamesIDs[c.StreamTag] = platform.InvalidID() + } + + // streamIDsNames is used for re-populating the resulting list of annotations with the stream names + // from the stream IDs returned from the database + streamIDsNames := make(map[platform.ID]string) + + tx, err := s.store.DB.BeginTxx(ctx, nil) + if err != nil { + tx.Rollback() + return nil, err + } + + // upsert each stream individually. a possible enhancement might be to do this as a single batched query + // it is unlikely that this would offer much benefit since there is currently no mechanism for creating large numbers + // of annotations simultaneously + now := time.Now() + for name := range streamNamesIDs { + query, args, err := newUpsertStreamQuery(orgID, s.idGenerator.ID(), now, influxdb.Stream{Name: name}) + if err != nil { + tx.Rollback() + return nil, err + } + + var streamID platform.ID + if err = tx.GetContext(ctx, &streamID, query, args...); err != nil { + tx.Rollback() + return nil, err + } + + streamNamesIDs[name] = streamID + streamIDsNames[streamID] = name + } + + // bulk insert for the creates. this also is unlikely to offer much performance benefit, but since the query + // is only used here it is easy enough to form to bulk query. + q := sq.Insert("annotations"). + Columns("id", "org_id", "stream_id", "summary", "message", "stickers", "duration", "lower", "upper"). + Suffix("RETURNING *") + + for _, create := range creates { + // double check that we have a valid name for this stream tag - error if we don't. this should never be an error. + streamID, ok := streamNamesIDs[create.StreamTag] + if !ok { + tx.Rollback() + return nil, &ierrors.Error{ + Code: ierrors.EInternal, + Msg: fmt.Sprintf("unable to find id for stream %q", create.StreamTag), + } + } + + // add the row to the query + newID := s.idGenerator.ID() + lower := create.StartTime.Format(time.RFC3339Nano) + upper := create.EndTime.Format(time.RFC3339Nano) + duration := timesToDuration(*create.StartTime, *create.EndTime) + q = q.Values(newID, orgID, streamID, create.Summary, create.Message, create.Stickers, duration, lower, upper) + } + + // get the query string and args list for the bulk insert + query, args, err := q.ToSql() + if err != nil { + tx.Rollback() + return nil, err + } + + // run the bulk insert and store the result + var res []*influxdb.StoredAnnotation + if err := tx.SelectContext(ctx, &res, query, args...); err != nil { + tx.Rollback() + return nil, err + } + + if err = tx.Commit(); err != nil { + return nil, err + } + + // add the stream names to the list of results + for _, a := range res { + a.StreamTag = streamIDsNames[a.StreamID] + } + + // convert the StoredAnnotation structs to AnnotationEvent structs before returning + return storedAnnotationsToEvents(res) +} + +// ListAnnotations returns a list of annotations from the database matching the filter +// For time range matching, sqlite is able to compare times with millisecond accuracy +func (s *Service) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) ([]influxdb.StoredAnnotation, error) { + // we need to explicitly format time strings here and elsewhere to ensure they are + // interpreted by the database consistently + sf := filter.StartTime.Format(time.RFC3339Nano) + ef := filter.EndTime.Format(time.RFC3339Nano) + + q := sq.Select("annotations.*", "streams.name AS stream"). + Distinct(). + InnerJoin("streams ON annotations.stream_id = streams.id"). + Where(sq.Eq{"annotations.org_id": orgID}). + Where(sq.GtOrEq{"lower": sf}). + Where(sq.LtOrEq{"upper": ef}) + + // If the filter contains stickers, use the json_each table value function to break out + // rows with the sticker array values. If the filter does not contain stickers, using + // the json_each TVF would exclude annotations with an empty array of stickers, so select + // from the annotations table only. This allows a filter with no sticker constraints to + // return annotations that don't have any stickers. + if len(filter.StickerIncludes) > 0 { + q = q.From("annotations, json_each(annotations.stickers) AS json") + + // Add sticker filters to the query + for k, v := range filter.StickerIncludes { + q = q.Where(sq.And{sq.Eq{"json.value": fmt.Sprintf("%s=%s", k, v)}}) + } + } else { + q = q.From("annotations") + } + + // Add stream name filters to the query + if len(filter.StreamIncludes) > 0 { + q = q.Where(sq.Eq{"stream": filter.StreamIncludes}) + } + + sql, args, err := q.ToSql() + if err != nil { + return nil, err + } + + ans := []influxdb.StoredAnnotation{} + if err := s.store.DB.SelectContext(ctx, &ans, sql, args...); err != nil { + return nil, err + } + + return ans, nil +} + +// GetAnnotation gets a single annotation by ID +func (s *Service) GetAnnotation(ctx context.Context, id platform.ID) (*influxdb.StoredAnnotation, error) { + q := sq.Select("annotations.*, streams.name AS stream"). + From("annotations"). + InnerJoin("streams ON annotations.stream_id = streams.id"). + Where(sq.Eq{"annotations.id": id}) + + query, args, err := q.ToSql() + if err != nil { + return nil, err + } + + var a influxdb.StoredAnnotation + if err := s.store.DB.GetContext(ctx, &a, query, args...); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, errAnnotationNotFound + } + + return nil, err + } + + return &a, nil +} + +// DeleteAnnotations deletes multiple annotations according to the provided filter +func (s *Service) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) error { + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + sf := delete.StartTime.Format(time.RFC3339Nano) + ef := delete.EndTime.Format(time.RFC3339Nano) + + // This is a subquery that will be as part of a DELETE FROM ... WHERE id IN (subquery) + // A subquery is used because the json_each virtual table can only be used in a SELECT + subQ := sq.Select("annotations.id"). + Distinct(). + InnerJoin("streams ON annotations.stream_id = streams.id"). + Where(sq.Eq{"annotations.org_id": orgID}). + Where(sq.GtOrEq{"lower": sf}). + Where(sq.LtOrEq{"upper": ef}) + + // If the filter contains stickers, use the json_each table value function to break out + // rows with the sticker array values. If the filter does not contain stickers, using + // the json_each TVF would exclude annotations with an empty array of stickers, so select + // from the annotations table only. This allows a filter with no sticker constraints to + // delete annotations that don't have any stickers. + if len(delete.Stickers) > 0 { + subQ = subQ.From("annotations, json_each(annotations.stickers) AS json") + + // Add sticker filters to the subquery + for k, v := range delete.Stickers { + subQ = subQ.Where(sq.And{sq.Eq{"json.value": fmt.Sprintf("%s=%s", k, v)}}) + } + } else { + subQ = subQ.From("annotations") + } + + // Add the stream name filter to the subquery (if present) + if len(delete.StreamTag) > 0 { + subQ = subQ.Where(sq.Eq{"streams.name": delete.StreamTag}) + } + + // Add the stream ID filter to the subquery (if present) + if delete.StreamID.Valid() { + subQ = subQ.Where(sq.Eq{"stream_id": delete.StreamID}) + } + + // Parse the subquery into a string and list of args + subQuery, subArgs, err := subQ.ToSql() + if err != nil { + return err + } + + // Convert the subquery into a sq.Sqlizer so that it can be used in the actual DELETE + // operation. This is a bit of a hack since squirrel doesn't have great support for subqueries + // outside of SELECT statements + subExpr := sq.Expr("("+subQuery+")", subArgs...) + + q := sq. + Delete("annotations"). + Suffix("WHERE annotations.id IN"). + SuffixExpr(subExpr) + + query, args, err := q.ToSql() + + if err != nil { + return err + } + + if _, err := s.store.DB.ExecContext(ctx, query, args...); err != nil { + return err + } + + return nil +} + +// DeleteAnnoation deletes a single annotation by ID +func (s *Service) DeleteAnnotation(ctx context.Context, id platform.ID) error { + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + q := sq.Delete("annotations"). + Where(sq.Eq{"id": id}). + Suffix("RETURNING id") + + query, args, err := q.ToSql() + if err != nil { + return err + } + + var d platform.ID + if err := s.store.DB.GetContext(ctx, &d, query, args...); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return errAnnotationNotFound + } + + return err + } + + return nil +} + +// UpdateAnnotation updates a single annotation by ID +// In a similar fashion as CreateAnnotations, if the StreamTag in the update request does not exist, +// a stream will be created as part of a transaction with the update operation +func (s *Service) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (*influxdb.AnnotationEvent, error) { + // get the full data for this annotation first so we can get its orgID + // this will ensure that the annotation already exists before starting the transaction + ann, err := s.GetAnnotation(ctx, id) + if err != nil { + return nil, err + } + + now := time.Now() + + // get a write lock on the database before starting the transaction to create/update the stream + // while simultaneously updating the annotation + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + tx, err := s.store.DB.BeginTxx(ctx, nil) + if err != nil { + tx.Rollback() + return nil, err + } + + query, args, err := newUpsertStreamQuery(ann.OrgID, s.idGenerator.ID(), now, influxdb.Stream{Name: update.StreamTag}) + if err != nil { + tx.Rollback() + return nil, err + } + + var streamID platform.ID + if err = tx.GetContext(ctx, &streamID, query, args...); err != nil { + tx.Rollback() + return nil, err + } + + q := sq.Update("annotations"). + SetMap(sq.Eq{ + "stream_id": streamID, + "summary": update.Summary, + "message": update.Message, + "stickers": update.Stickers, + "duration": timesToDuration(*update.StartTime, *update.EndTime), + "lower": update.StartTime.Format(time.RFC3339Nano), + "upper": update.EndTime.Format(time.RFC3339Nano), + }). + Where(sq.Eq{"id": id}). + Suffix("RETURNING *") + + query, args, err = q.ToSql() + if err != nil { + return nil, err + } + + var st influxdb.StoredAnnotation + err = tx.GetContext(ctx, &st, query, args...) + if err != nil { + tx.Rollback() + return nil, err + } + + if err = tx.Commit(); err != nil { + return nil, err + } + + // add the stream name to the result. we know that this StreamTag value was updated to the + // stream via the transaction having completed successfully. + st.StreamTag = update.StreamTag + + return st.ToEvent() +} + +// ListStreams returns a list of streams matching the filter for the provided orgID. +func (s *Service) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) ([]influxdb.StoredStream, error) { + q := sq.Select("id", "org_id", "name", "description", "created_at", "updated_at"). + From("streams"). + Where(sq.Eq{"org_id": orgID}) + + // Add stream name filters to the query + if len(filter.StreamIncludes) > 0 { + q = q.Where(sq.Eq{"name": filter.StreamIncludes}) + } + + sql, args, err := q.ToSql() + if err != nil { + return nil, err + } + + sts := []influxdb.StoredStream{} + err = s.store.DB.SelectContext(ctx, &sts, sql, args...) + if err != nil { + return nil, err + } + + return sts, nil +} + +// GetStream gets a single stream by ID +func (s *Service) GetStream(ctx context.Context, id platform.ID) (*influxdb.StoredStream, error) { + q := sq.Select("id", "org_id", "name", "description", "created_at", "updated_at"). + From("streams"). + Where(sq.Eq{"id": id}) + + query, args, err := q.ToSql() + if err != nil { + return nil, err + } + + var st influxdb.StoredStream + if err := s.store.DB.GetContext(ctx, &st, query, args...); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, errStreamNotFound + } + + return nil, err + } + + return &st, nil +} + +// CreateOrUpdateStream creates a new stream, or updates the description of an existing stream. +// Doesn't support updating a stream desctription to "". For that use the UpdateStream method. +func (s *Service) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + newID := s.idGenerator.ID() + now := time.Now() + query, args, err := newUpsertStreamQuery(orgID, newID, now, stream) + if err != nil { + return nil, err + } + + var id platform.ID + if err = s.store.DB.GetContext(ctx, &id, query, args...); err != nil { + return nil, err + } + + // do a separate query to read the stream back from the database and return it. + // this is necessary because the sqlite driver does not support scanning time values from + // a RETURNING clause back into time.Time + return s.getReadStream(ctx, id) +} + +// UpdateStream updates a stream name and/or a description. It is strictly used for updating an existing stream. +func (s *Service) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + q := sq.Update("streams"). + SetMap(sq.Eq{ + "name": stream.Name, + "description": stream.Description, + "updated_at": sq.Expr(`datetime('now')`), + }). + Where(sq.Eq{"id": id}). + Suffix(`RETURNING id`) + + query, args, err := q.ToSql() + if err != nil { + return nil, err + } + + var newID platform.ID + err = s.store.DB.GetContext(ctx, &newID, query, args...) + if err != nil { + if err == sql.ErrNoRows { + return nil, errStreamNotFound + } + + return nil, err + } + + // do a separate query to read the stream back from the database and return it. + // this is necessary because the sqlite driver does not support scanning time values from + // a RETURNING clause back into time.Time + return s.getReadStream(ctx, newID) +} + +// DeleteStreams is used for deleting multiple streams by name +func (s *Service) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) error { + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + q := sq.Delete("streams"). + Where(sq.Eq{"org_id": orgID}). + Where(sq.Eq{"name": delete.Names}) + + query, args, err := q.ToSql() + if err != nil { + return err + } + + _, err = s.store.DB.ExecContext(ctx, query, args...) + if err != nil { + return err + } + + return nil +} + +// DeleteStreamByID deletes a single stream by ID. Returns an error if the ID could not be found. +func (s *Service) DeleteStreamByID(ctx context.Context, id platform.ID) error { + s.store.Mu.Lock() + defer s.store.Mu.Unlock() + + q := sq.Delete("streams"). + Where(sq.Eq{"id": id}). + Suffix("RETURNING id") + + query, args, err := q.ToSql() + if err != nil { + return err + } + + var d platform.ID + if err := s.store.DB.GetContext(ctx, &d, query, args...); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return errStreamNotFound + } + + return err + } + + return nil +} + +func newUpsertStreamQuery(orgID, newID platform.ID, t time.Time, stream influxdb.Stream) (string, []interface{}, error) { + q := sq.Insert("streams"). + Columns("id", "org_id", "name", "description", "created_at", "updated_at"). + Values(newID, orgID, stream.Name, stream.Description, t, t). + Suffix(`ON CONFLICT(org_id, name) DO UPDATE + SET + updated_at = excluded.updated_at, + description = IIF(length(excluded.description) = 0, description, excluded.description)`). + Suffix("RETURNING id") + + return q.ToSql() +} + +// getReadStream is a helper which should only be called when the stream has been verified to exist +// via an update or insert. +func (s *Service) getReadStream(ctx context.Context, id platform.ID) (*influxdb.ReadStream, error) { + q := sq.Select("id", "name", "description", "created_at", "updated_at"). + From("streams"). + Where(sq.Eq{"id": id}) + + query, args, err := q.ToSql() + if err != nil { + return nil, err + } + + r := &influxdb.ReadStream{} + if err := s.store.DB.GetContext(ctx, r, query, args...); err != nil { + return nil, err + } + + return r, nil +} + +func storedAnnotationsToEvents(stored []*influxdb.StoredAnnotation) ([]influxdb.AnnotationEvent, error) { + events := make([]influxdb.AnnotationEvent, 0, len(stored)) + for _, s := range stored { + c, err := s.ToCreate() + if err != nil { + return nil, err + } + + events = append(events, influxdb.AnnotationEvent{ + ID: s.ID, + AnnotationCreate: *c, + }) + } + + return events, nil +} + +func timesToDuration(l, u time.Time) string { + return fmt.Sprintf("[%s, %s]", l.Format(time.RFC3339Nano), u.Format(time.RFC3339Nano)) +} diff --git a/annotations/service_test.go b/annotations/service_test.go new file mode 100644 index 00000000000..d91c5df8f99 --- /dev/null +++ b/annotations/service_test.go @@ -0,0 +1,1054 @@ +//go:build sqlite_json && sqlite_foreign_keys + +package annotations + +import ( + "context" + "sort" + "testing" + "time" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/snowflake" + "github.com/influxdata/influxdb/v2/sqlite" + "github.com/influxdata/influxdb/v2/sqlite/migrations" + influxdbtesting "github.com/influxdata/influxdb/v2/testing" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +var ( + idGen = snowflake.NewIDGenerator() +) + +func TestAnnotationsCRUD(t *testing.T) { + t.Parallel() + + // intialize some variables that can be shared across tests + // the timeline for the 3 test annotations start & end times is visualized below. + // now + // v + // |---|---|---|---| + // ^ ^ ^ ^ ^ + // st1 et1 + // st2 et2 + // st3 et3 + // st4 et4 + + et1 := time.Now().UTC() + st1 := et1.Add(-10 * time.Minute) + + et2 := et1.Add(-5 * time.Minute) + st2 := et2.Add(-10 * time.Minute) + + et3 := et1.Add(-10 * time.Minute) + st3 := et2.Add(-15 * time.Minute) + + et4 := et3 + st4 := st3 + + // used for tests involving time filters + earlierEt1 := et1.Add(-1 * time.Millisecond) + laterSt3 := st3.Add(1 * time.Millisecond) + beforeAny := st3.Add(-1 * time.Millisecond) + afterAny := et1.Add(1 * time.Millisecond) + + orgID := *influxdbtesting.IDPtr(1) + otherOrgID := *influxdbtesting.IDPtr(2) + ctx := context.Background() + + s1 := influxdb.StoredAnnotation{ + OrgID: orgID, + StreamTag: "stream1", + Summary: "summary1", + Message: "message1", + Stickers: map[string]string{"stick1": "val1", "stick2": "val2"}, + Duration: timesToDuration(st1, et1), + Lower: st1.Format(time.RFC3339Nano), + Upper: et1.Format(time.RFC3339Nano), + } + + c1, err := s1.ToCreate() + require.NoError(t, err) + + s2 := influxdb.StoredAnnotation{ + OrgID: orgID, + StreamTag: "stream2", + Summary: "summary2", + Message: "message2", + Stickers: map[string]string{"stick2": "val2", "stick3": "val3", "stick4": "val4"}, + Duration: timesToDuration(st2, et2), + Lower: st2.Format(time.RFC3339Nano), + Upper: et2.Format(time.RFC3339Nano), + } + + c2, err := s2.ToCreate() + require.NoError(t, err) + + s3 := influxdb.StoredAnnotation{ + OrgID: orgID, + StreamTag: "stream2", + Summary: "summary3", + Message: "message3", + Stickers: map[string]string{"stick1": "val2"}, + Duration: timesToDuration(st3, et3), + Lower: st3.Format(time.RFC3339Nano), + Upper: et3.Format(time.RFC3339Nano), + } + + c3, err := s3.ToCreate() + require.NoError(t, err) + + // s4 is an annotation without any stickers, with the same start/end time as s3 + s4 := influxdb.StoredAnnotation{ + OrgID: orgID, + StreamTag: "stream4", + Summary: "summary4", + Message: "message4", + Stickers: map[string]string{}, + Duration: timesToDuration(st4, et4), + Lower: st3.Format(time.RFC3339Nano), + Upper: et3.Format(time.RFC3339Nano), + } + + c4, err := s4.ToCreate() + require.NoError(t, err) + + // helper function for setting up the database with data that can be used for tests + // that involve querying the database. uses the annotations objects initialized above + // via the closure. + populateAnnotationsData := func(t *testing.T, svc *Service) []influxdb.AnnotationEvent { + t.Helper() + + got, err := svc.CreateAnnotations(ctx, orgID, []influxdb.AnnotationCreate{*c1, *c2, *c3, *c4}) + require.NoError(t, err) + assertAnnotationEvents(t, got, []influxdb.AnnotationEvent{ + {AnnotationCreate: *c1}, + {AnnotationCreate: *c2}, + {AnnotationCreate: *c3}, + {AnnotationCreate: *c4}, + }) + + return got + } + + t.Run("create annotations", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + + tests := []struct { + name string + creates []influxdb.AnnotationCreate + want []influxdb.AnnotationEvent + }{ + { + "empty creates list returns empty events list", + []influxdb.AnnotationCreate{}, + []influxdb.AnnotationEvent{}, + }, + { + "creates annotations successfully", + []influxdb.AnnotationCreate{*c1, *c2, *c3, *c4}, + []influxdb.AnnotationEvent{ + {AnnotationCreate: *c1}, + {AnnotationCreate: *c2}, + {AnnotationCreate: *c3}, + {AnnotationCreate: *c4}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := svc.CreateAnnotations(ctx, orgID, tt.creates) + require.NoError(t, err) + assertAnnotationEvents(t, got, tt.want) + }) + } + }) + + t.Run("select with filters", func(t *testing.T) { + + svc, clean := newTestService(t) + defer clean(t) + populateAnnotationsData(t, svc) + + tests := []struct { + name string + orgID platform.ID + f influxdb.AnnotationListFilter + want []influxdb.StoredAnnotation + skip string // link to issue and/or reason + }{ + { + name: "time filter is inclusive - gets all", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + BasicFilter: influxdb.BasicFilter{ + StartTime: &st3, + EndTime: &et1, + }, + }, + want: []influxdb.StoredAnnotation{s1, s2, s3, s4}, + skip: "", + }, + { + name: "doesn't get results for other org", + orgID: otherOrgID, + f: influxdb.AnnotationListFilter{ + BasicFilter: influxdb.BasicFilter{ + StartTime: &st3, + EndTime: &et1, + }, + }, + want: []influxdb.StoredAnnotation{}, + skip: "", + }, + { + name: "end time will filter out annotations", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + BasicFilter: influxdb.BasicFilter{ + StartTime: &st3, + EndTime: &earlierEt1, + }, + }, + want: []influxdb.StoredAnnotation{s2, s3, s4}, + skip: "", + }, + { + name: "start time will filter out annotations", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + BasicFilter: influxdb.BasicFilter{ + StartTime: &laterSt3, + EndTime: &et1, + }, + }, + want: []influxdb.StoredAnnotation{s1, s2}, + skip: "https://github.com/influxdata/influxdb/issues/23272", + }, + { + name: "time can filter out all annotations if it's too soon", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + BasicFilter: influxdb.BasicFilter{ + StartTime: &beforeAny, + EndTime: &beforeAny, + }, + }, + want: []influxdb.StoredAnnotation{}, + skip: "https://github.com/influxdata/influxdb/issues/23272", + }, + { + name: "time can filter out all annotations if it's too late", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + BasicFilter: influxdb.BasicFilter{ + StartTime: &afterAny, + EndTime: &afterAny, + }, + }, + want: []influxdb.StoredAnnotation{}, + skip: "", + }, + { + name: "time can filter out all annotations if it's too narrow", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + BasicFilter: influxdb.BasicFilter{ + StartTime: &laterSt3, + EndTime: &et3, + }, + }, + want: []influxdb.StoredAnnotation{}, + skip: "", + }, + { + name: "can filter by stickers - one sticker matches one", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + StickerIncludes: map[string]string{"stick1": "val2"}, + }, + want: []influxdb.StoredAnnotation{s3}, + skip: "", + }, + { + name: "can filter by stickers - one sticker matches multiple", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + StickerIncludes: map[string]string{"stick2": "val2"}, + }, + want: []influxdb.StoredAnnotation{s1, s2}, + skip: "", + }, + { + name: "can filter by stickers - matching key but wrong value", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + StickerIncludes: map[string]string{"stick2": "val3"}, + }, + want: []influxdb.StoredAnnotation{}, + skip: "", + }, + { + name: "can filter by stream - matches one", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + StreamIncludes: []string{"stream1"}, + }, + want: []influxdb.StoredAnnotation{s1}, + skip: "", + }, + { + name: "can filter by stream - matches multiple", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + StreamIncludes: []string{"stream2"}, + }, + want: []influxdb.StoredAnnotation{s2, s3}, + skip: "", + }, + { + name: "can filter by stream - no match", + orgID: orgID, + f: influxdb.AnnotationListFilter{ + StreamIncludes: []string{"badStream"}, + }, + want: []influxdb.StoredAnnotation{}, + skip: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.skip != "" { + t.Skip(tt.skip) + } + tt.f.Validate(time.Now) + got, err := svc.ListAnnotations(ctx, tt.orgID, tt.f) + require.NoError(t, err) + assertStoredAnnotations(t, got, tt.want) + }) + } + }) + + t.Run("get by id", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + anns := populateAnnotationsData(t, svc) + + tests := []struct { + name string + id platform.ID + want *influxdb.AnnotationEvent + wantErr error + }{ + { + "gets the first one by id", + anns[0].ID, + &anns[0], + nil, + }, + { + "gets the second one by id", + anns[1].ID, + &anns[1], + nil, + }, + { + "has the correct error if not found", + idGen.ID(), + nil, + errAnnotationNotFound, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := svc.GetAnnotation(ctx, tt.id) + require.Equal(t, tt.wantErr, err) + + if tt.want == nil { + require.Nil(t, got) + } else { + e, err := got.ToEvent() + require.NoError(t, err) + require.Equal(t, tt.want, e) + } + }) + } + }) + + t.Run("delete multiple with a filter", func(t *testing.T) { + t.Run("delete by stream id", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + populateAnnotationsData(t, svc) + + ctx := context.Background() + + lf := influxdb.AnnotationListFilter{BasicFilter: influxdb.BasicFilter{}} + lf.Validate(time.Now) + ans, err := svc.ListAnnotations(ctx, orgID, lf) + require.NoError(t, err) + + annID1 := ans[0].ID + streamID1 := ans[0].StreamID + st1, err := time.Parse(time.RFC3339Nano, ans[0].Lower) + require.NoError(t, err) + et1, err := time.Parse(time.RFC3339Nano, ans[0].Upper) + require.NoError(t, err) + + streamID2 := ans[1].StreamID + st2, err := time.Parse(time.RFC3339Nano, ans[1].Lower) + require.NoError(t, err) + et2, err := time.Parse(time.RFC3339Nano, ans[1].Upper) + require.NoError(t, err) + + tests := []struct { + name string + deleteOrgID platform.ID + id platform.ID + filter influxdb.AnnotationDeleteFilter + shouldDelete bool + }{ + { + "matches stream id but not time range", + orgID, + annID1, + influxdb.AnnotationDeleteFilter{ + StreamID: streamID1, + StartTime: &st2, + EndTime: &et2, + }, + false, + }, + { + "matches time range but not stream id", + orgID, + annID1, + influxdb.AnnotationDeleteFilter{ + StreamID: streamID2, + StartTime: &st1, + EndTime: &et1, + }, + false, + }, + { + "doesn't delete for other org", + otherOrgID, + annID1, + influxdb.AnnotationDeleteFilter{ + StreamID: streamID1, + StartTime: &st1, + EndTime: &et1, + }, + false, + }, + { + "matches stream id and time range", + orgID, + annID1, + influxdb.AnnotationDeleteFilter{ + StreamID: streamID1, + StartTime: &st1, + EndTime: &et1, + }, + true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := svc.DeleteAnnotations(ctx, tt.deleteOrgID, tt.filter) + require.NoError(t, err) + + lf := influxdb.AnnotationListFilter{BasicFilter: influxdb.BasicFilter{}} + lf.Validate(time.Now) + list, err := svc.ListAnnotations(ctx, orgID, lf) + require.NoError(t, err) + get, getErr := svc.GetAnnotation(ctx, tt.id) + + if tt.shouldDelete { + require.Equal(t, 3, len(list)) + require.Nil(t, get) + require.Equal(t, errAnnotationNotFound, getErr) + } else { + require.Equal(t, 4, len(list)) + require.NoError(t, getErr) + require.Equal(t, *get, ans[0]) + } + }) + } + }) + + t.Run("delete with non-id filters", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + populateAnnotationsData(t, svc) + + tests := []struct { + name string + deleteOrgID platform.ID + filter influxdb.AnnotationDeleteFilter + wantList []influxdb.StoredAnnotation + }{ + { + "matches stream tag but not time range", + orgID, + influxdb.AnnotationDeleteFilter{ + StreamTag: "stream1", + StartTime: &st1, + EndTime: &earlierEt1, + }, + []influxdb.StoredAnnotation{s1, s2, s3, s4}, + }, + { + "matches stream tag and time range", + orgID, + influxdb.AnnotationDeleteFilter{ + StreamTag: "stream1", + StartTime: &st1, + EndTime: &et1, + }, + []influxdb.StoredAnnotation{s2, s3, s4}, + }, + { + "matches stream tag and time range for item with no stickers", + orgID, + influxdb.AnnotationDeleteFilter{ + StreamTag: "stream4", + StartTime: &st4, + EndTime: &et4, + }, + []influxdb.StoredAnnotation{s1, s2, s3}, + }, + { + "matches stream tag for multiple", + orgID, + influxdb.AnnotationDeleteFilter{ + StreamTag: "stream2", + StartTime: &st3, + EndTime: &et1, + }, + []influxdb.StoredAnnotation{s1, s4}, + }, + { + "matches stream tag but wrong org", + otherOrgID, + influxdb.AnnotationDeleteFilter{ + StreamTag: "stream1", + StartTime: &st1, + EndTime: &et1, + }, + []influxdb.StoredAnnotation{s1, s2, s3, s4}, + }, + + { + "matches stickers but not time range", + orgID, + influxdb.AnnotationDeleteFilter{ + Stickers: map[string]string{"stick1": "val1"}, + StartTime: &st1, + EndTime: &earlierEt1, + }, + []influxdb.StoredAnnotation{s1, s2, s3, s4}, + }, + { + "matches stickers and time range", + orgID, + influxdb.AnnotationDeleteFilter{ + Stickers: map[string]string{"stick1": "val1"}, + StartTime: &st1, + EndTime: &et1, + }, + []influxdb.StoredAnnotation{s2, s3, s4}, + }, + { + "matches stickers for multiple", + orgID, + influxdb.AnnotationDeleteFilter{ + Stickers: map[string]string{"stick2": "val2"}, + StartTime: &st2, + EndTime: &et1, + }, + []influxdb.StoredAnnotation{s3, s4}, + }, + { + "matches stickers but wrong org", + otherOrgID, + influxdb.AnnotationDeleteFilter{ + Stickers: map[string]string{"stick1": "val1"}, + StartTime: &st1, + EndTime: &et1, + }, + []influxdb.StoredAnnotation{s1, s2, s3, s4}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + populateAnnotationsData(t, svc) + + err := svc.DeleteAnnotations(ctx, tt.deleteOrgID, tt.filter) + require.NoError(t, err) + + f := influxdb.AnnotationListFilter{} + f.Validate(time.Now) + list, err := svc.ListAnnotations(ctx, orgID, f) + require.NoError(t, err) + assertStoredAnnotations(t, list, tt.wantList) + }) + } + }) + }) + + t.Run("delete a single annotation by id", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + ans := populateAnnotationsData(t, svc) + + tests := []struct { + name string + id platform.ID + shouldDelete bool + }{ + { + "has the correct error if not found", + idGen.ID(), + false, + }, + { + "deletes the first one by id", + ans[0].ID, + true, + }, + { + "deletes the second one by id", + ans[1].ID, + true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := svc.DeleteAnnotation(ctx, tt.id) + + if tt.shouldDelete { + require.NoError(t, err) + } else { + require.Equal(t, errAnnotationNotFound, err) + } + + got, err := svc.GetAnnotation(ctx, tt.id) + require.Equal(t, errAnnotationNotFound, err) + require.Nil(t, got) + }) + } + }) + + t.Run("update a single annotation by id", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + ans := populateAnnotationsData(t, svc) + + updatedTime := time.Time{}.Add(time.Minute) + + tests := []struct { + name string + id platform.ID + update influxdb.AnnotationCreate + wantErr error + }{ + { + "has the correct error if not found", + idGen.ID(), + influxdb.AnnotationCreate{ + StreamTag: "updated tag", + Summary: "updated summary", + Message: "updated message", + Stickers: map[string]string{"updated": "sticker"}, + EndTime: &updatedTime, + StartTime: &updatedTime, + }, + errAnnotationNotFound, + }, + { + "updates the first one by id", + ans[0].ID, + influxdb.AnnotationCreate{ + StreamTag: "updated tag", + Summary: "updated summary", + Message: "updated message", + Stickers: map[string]string{"updated": "sticker"}, + EndTime: &updatedTime, + StartTime: &updatedTime, + }, + nil, + }, + { + "updates the second one by id", + ans[1].ID, + influxdb.AnnotationCreate{ + StreamTag: "updated tag2", + Summary: "updated summary2", + Message: "updated message2", + Stickers: map[string]string{"updated2": "sticker2"}, + EndTime: &updatedTime, + StartTime: &updatedTime, + }, + nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + want := &influxdb.AnnotationEvent{ID: tt.id, AnnotationCreate: tt.update} + if tt.wantErr != nil { + want = nil + } + + got, err := svc.UpdateAnnotation(ctx, tt.id, tt.update) + require.Equal(t, tt.wantErr, err) + require.Equal(t, want, got) + + if tt.wantErr == nil { + new, err := svc.GetAnnotation(ctx, tt.id) + require.NoError(t, err) + e, err := new.ToEvent() + require.NoError(t, err) + require.Equal(t, got, e) + } + }) + } + }) + + t.Run("deleted streams cascade to deleted annotations", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + + ctx := context.Background() + ans := populateAnnotationsData(t, svc) + sort.Slice(ans, func(i, j int) bool { + return ans[i].StreamTag < ans[j].StreamTag + }) + + // annotations s2 and s3 have the stream tag of "stream2", so get the id of that stream + id := ans[1].ID + a, err := svc.GetAnnotation(ctx, id) + require.NoError(t, err) + streamID := a.StreamID + + // delete the stream + err = svc.DeleteStreamByID(ctx, streamID) + require.NoError(t, err) + + // s1 and s4 should still be there + s1, err := svc.GetAnnotation(ctx, ans[0].ID) + require.NoError(t, err) + s4, err := svc.GetAnnotation(ctx, ans[3].ID) + require.NoError(t, err) + + // both s2 and s3 should now be deleted + f := influxdb.AnnotationListFilter{} + f.Validate(time.Now) + remaining, err := svc.ListAnnotations(ctx, orgID, f) + require.NoError(t, err) + require.Equal(t, []influxdb.StoredAnnotation{*s1, *s4}, remaining) + }) + + t.Run("renamed streams are reflected in subsequent annotation queries", func(t *testing.T) { + svc, clean := newTestService(t) + defer clean(t) + + ctx := context.Background() + populateAnnotationsData(t, svc) + + // get all the annotations with the tag "stream2" + f := influxdb.AnnotationListFilter{StreamIncludes: []string{"stream2"}} + f.Validate(time.Now) + originalList, err := svc.ListAnnotations(ctx, orgID, f) + require.NoError(t, err) + assertStoredAnnotations(t, []influxdb.StoredAnnotation{s2, s3}, originalList) + + // check that the original list has the right stream tag for all annotations + for _, a := range originalList { + require.Equal(t, "stream2", a.StreamTag) + } + + // update the name for stream2 + streamID := originalList[0].StreamID + _, err = svc.UpdateStream(ctx, streamID, influxdb.Stream{Name: "new name", Description: "new desc"}) + require.NoError(t, err) + + // get all the annotations with the new tag + f = influxdb.AnnotationListFilter{StreamIncludes: []string{"new name"}} + f.Validate(time.Now) + newList, err := svc.ListAnnotations(ctx, orgID, f) + require.NoError(t, err) + + // check that the new list has the right stream tag for all annotations + for _, a := range newList { + require.Equal(t, "new name", a.StreamTag) + } + + // verify that the new list of annotations is the same as the original except for the stream name change + require.Equal(t, len(originalList), len(newList)) + + sort.Slice(originalList, func(i, j int) bool { + return originalList[i].ID < originalList[j].ID + }) + + sort.Slice(newList, func(i, j int) bool { + return originalList[i].ID < originalList[j].ID + }) + + for i := range newList { + originalList[i].StreamTag = "new name" + require.Equal(t, originalList[i], newList[i]) + } + }) +} + +func TestStreamsCRUDSingle(t *testing.T) { + t.Parallel() + + svc, clean := newTestService(t) + defer clean(t) + + ctx := context.Background() + orgID := *influxdbtesting.IDPtr(1) + + stream := influxdb.Stream{ + Name: "testName", + Description: "original description", + } + + var err error + var s1, s2, s3 *influxdb.ReadStream + + t.Run("create a single stream", func(t *testing.T) { + s1, err = svc.CreateOrUpdateStream(ctx, orgID, stream) + require.NoError(t, err) + require.Equal(t, stream.Name, s1.Name) + require.Equal(t, stream.Description, s1.Description) + }) + + t.Run("stream updates", func(t *testing.T) { + u1 := influxdb.Stream{ + Name: "testName", + Description: "updated description", + } + + u2 := influxdb.Stream{ + Name: "otherName", + Description: "other description", + } + + t.Run("updating an existing stream with CreateOrUpdateStream does not change id but does change description", func(t *testing.T) { + s2, err = svc.CreateOrUpdateStream(ctx, orgID, u1) + require.NoError(t, err) + require.Equal(t, stream.Name, s2.Name) + require.Equal(t, u1.Description, s2.Description) + require.Equal(t, s1.ID, s2.ID) + }) + + t.Run("updating a non-existant stream with UpdateStream returns not found error", func(t *testing.T) { + readGot, err := svc.UpdateStream(ctx, idGen.ID(), u2) + require.Nil(t, readGot) + require.Equal(t, errStreamNotFound, err) + }) + + t.Run("updating an existing stream with UpdateStream changes both name & description", func(t *testing.T) { + s3, err = svc.UpdateStream(ctx, s2.ID, u2) + require.NoError(t, err) + require.Equal(t, s2.ID, s3.ID) + require.Equal(t, u2.Name, s3.Name) + require.Equal(t, u2.Description, s3.Description) + }) + }) + + t.Run("getting a stream", func(t *testing.T) { + t.Run("non-existant stream returns a not found error", func(t *testing.T) { + storedGot, err := svc.GetStream(ctx, idGen.ID()) + require.Nil(t, storedGot) + require.Equal(t, errStreamNotFound, err) + }) + + t.Run("existing stream returns without error", func(t *testing.T) { + storedGot, err := svc.GetStream(ctx, s3.ID) + require.NoError(t, err) + require.Equal(t, s3.Name, storedGot.Name) + require.Equal(t, s3.Description, storedGot.Description) + }) + }) + + t.Run("deleting a stream", func(t *testing.T) { + t.Run("non-existant stream returns a not found error", func(t *testing.T) { + err := svc.DeleteStreamByID(ctx, idGen.ID()) + require.Equal(t, errStreamNotFound, err) + }) + + t.Run("deletes an existing stream without error", func(t *testing.T) { + err := svc.DeleteStreamByID(ctx, s1.ID) + require.NoError(t, err) + + storedGot, err := svc.GetStream(ctx, s1.ID) + require.Nil(t, storedGot) + require.Equal(t, err, errStreamNotFound) + }) + }) +} + +func TestStreamsCRUDMany(t *testing.T) { + t.Parallel() + + svc, clean := newTestService(t) + defer clean(t) + + ctx := context.Background() + + orgID1 := influxdbtesting.IDPtr(1) + orgID2 := influxdbtesting.IDPtr(2) + orgID3 := influxdbtesting.IDPtr(3) + + // populate the database with some streams for testing delete and select many + combos := map[platform.ID][]string{ + *orgID1: {"org1_s1", "org1_s2", "org1_s3", "org1_s4"}, + *orgID2: {"org2_s1"}, + *orgID3: {"org3_s1", "org3_s2"}, + } + + for orgID, streams := range combos { + for _, s := range streams { + _, err := svc.CreateOrUpdateStream(ctx, orgID, influxdb.Stream{ + Name: s, + }) + require.NoError(t, err) + } + } + + t.Run("all streams can be listed for each org if passing an empty list", func(t *testing.T) { + for orgID, streams := range combos { + got, err := svc.ListStreams(ctx, orgID, influxdb.StreamListFilter{ + StreamIncludes: []string{}, + }) + require.NoError(t, err) + assertStreamNames(t, streams, got) + } + }) + + t.Run("can select specific streams and get only those for that org", func(t *testing.T) { + for orgID, streams := range combos { + got, err := svc.ListStreams(ctx, orgID, influxdb.StreamListFilter{ + StreamIncludes: streams, + }) + require.NoError(t, err) + assertStreamNames(t, streams, got) + } + }) + + t.Run("can delete a single stream with DeleteStreams, but does not delete streams for other org", func(t *testing.T) { + err := svc.DeleteStreams(ctx, *orgID1, influxdb.BasicStream{ + Names: []string{"org1_s1", "org2_s1"}, + }) + require.NoError(t, err) + + got, err := svc.ListStreams(ctx, *orgID1, influxdb.StreamListFilter{ + StreamIncludes: []string{}, + }) + require.NoError(t, err) + assertStreamNames(t, []string{"org1_s2", "org1_s3", "org1_s4"}, got) + + got, err = svc.ListStreams(ctx, *orgID2, influxdb.StreamListFilter{ + StreamIncludes: []string{}, + }) + require.NoError(t, err) + assertStreamNames(t, []string{"org2_s1"}, got) + }) + + t.Run("can delete all streams for all orgs", func(t *testing.T) { + for orgID, streams := range combos { + err := svc.DeleteStreams(ctx, orgID, influxdb.BasicStream{ + Names: streams, + }) + require.NoError(t, err) + + got, err := svc.ListStreams(ctx, orgID, influxdb.StreamListFilter{ + StreamIncludes: []string{}, + }) + require.NoError(t, err) + require.Equal(t, []influxdb.StoredStream{}, got) + } + }) +} + +func assertAnnotationEvents(t *testing.T, got, want []influxdb.AnnotationEvent) { + t.Helper() + + require.Equal(t, len(want), len(got)) + + sort.Slice(want, func(i, j int) bool { + return want[i].StreamTag < want[j].StreamTag + }) + + sort.Slice(got, func(i, j int) bool { + return got[i].StreamTag < got[j].StreamTag + }) + + for idx, w := range want { + w.ID = got[idx].ID + require.Equal(t, w, got[idx]) + } +} + +// should make these are lists similar +func assertStoredAnnotations(t *testing.T, got, want []influxdb.StoredAnnotation) { + t.Helper() + + require.Equal(t, len(want), len(got)) + + sort.Slice(want, func(i, j int) bool { + return want[i].ID < want[j].ID + }) + + sort.Slice(got, func(i, j int) bool { + return got[i].ID < got[j].ID + }) + + for idx, w := range want { + w.ID = got[idx].ID + w.StreamID = got[idx].StreamID + require.Equal(t, w, got[idx]) + } +} + +func assertStreamNames(t *testing.T, want []string, got []influxdb.StoredStream) { + t.Helper() + + storedNames := make([]string, len(got)) + for i, s := range got { + storedNames[i] = s.Name + } + + require.ElementsMatch(t, want, storedNames) +} + +func newTestService(t *testing.T) (*Service, func(t *testing.T)) { + t.Helper() + + store, clean := sqlite.NewTestStore(t) + ctx := context.Background() + + sqliteMigrator := sqlite.NewMigrator(store, zap.NewNop()) + err := sqliteMigrator.Up(ctx, migrations.AllUp) + require.NoError(t, err) + + svc := NewService(store) + + return svc, clean +} diff --git a/annotations/transport/annotations_router.go b/annotations/transport/annotations_router.go new file mode 100644 index 00000000000..e2f6b096040 --- /dev/null +++ b/annotations/transport/annotations_router.go @@ -0,0 +1,289 @@ +package transport + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/go-chi/chi" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" +) + +func (h *AnnotationHandler) annotationsRouter() http.Handler { + r := chi.NewRouter() + + r.Post("/", h.handleCreateAnnotations) + r.Get("/", h.handleGetAnnotations) + r.Delete("/", h.handleDeleteAnnotations) + + r.Route("/{id}", func(r chi.Router) { + r.Get("/", h.handleGetAnnotation) + r.Delete("/", h.handleDeleteAnnotation) + r.Put("/", h.handleUpdateAnnotation) + }) + + return r +} + +func (h *AnnotationHandler) handleCreateAnnotations(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + o, err := platform.IDFromString(r.URL.Query().Get("orgID")) + if err != nil { + h.api.Err(w, r, errBadOrg) + return + } + + c, err := decodeCreateAnnotationsRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + l, err := h.annotationService.CreateAnnotations(ctx, *o, c) + if err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusOK, l) +} + +func (h *AnnotationHandler) handleGetAnnotations(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + o, err := platform.IDFromString(r.URL.Query().Get("orgID")) + if err != nil { + h.api.Err(w, r, errBadOrg) + return + } + + f, err := decodeListAnnotationsRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + s, err := h.annotationService.ListAnnotations(ctx, *o, *f) + if err != nil { + h.api.Err(w, r, err) + return + } + + l, err := storedAnnotationsToReadAnnotations(s) + if err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusOK, l) +} + +func (h *AnnotationHandler) handleDeleteAnnotations(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + o, err := platform.IDFromString(r.URL.Query().Get("orgID")) + if err != nil { + h.api.Err(w, r, errBadOrg) + return + } + + f, err := decodeDeleteAnnotationsRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + if err = h.annotationService.DeleteAnnotations(ctx, *o, *f); err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusNoContent, nil) +} + +func (h *AnnotationHandler) handleGetAnnotation(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + id, err := platform.IDFromString(chi.URLParam(r, "id")) + if err != nil { + h.api.Err(w, r, errBadAnnotationId) + return + } + + s, err := h.annotationService.GetAnnotation(ctx, *id) + if err != nil { + h.api.Err(w, r, err) + return + } + + c, err := storedAnnotationToEvent(s) + if err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusOK, c) +} + +func (h *AnnotationHandler) handleDeleteAnnotation(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + id, err := platform.IDFromString(chi.URLParam(r, "id")) + if err != nil { + h.api.Err(w, r, errBadAnnotationId) + return + } + + if err := h.annotationService.DeleteAnnotation(ctx, *id); err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusNoContent, nil) +} + +func (h *AnnotationHandler) handleUpdateAnnotation(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + id, err := platform.IDFromString(chi.URLParam(r, "id")) + if err != nil { + h.api.Err(w, r, errBadAnnotationId) + return + } + + u, err := decodeUpdateAnnotationRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + a, err := h.annotationService.UpdateAnnotation(ctx, *id, *u) + if err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusOK, a) +} + +func decodeCreateAnnotationsRequest(r *http.Request) ([]influxdb.AnnotationCreate, error) { + cs := []influxdb.AnnotationCreate{} + if err := json.NewDecoder(r.Body).Decode(&cs); err != nil { + return nil, err + } + + for _, c := range cs { + if err := c.Validate(time.Now); err != nil { + return nil, err + } + } + + return cs, nil +} + +func decodeListAnnotationsRequest(r *http.Request) (*influxdb.AnnotationListFilter, error) { + startTime, endTime, err := tFromReq(r) + if err != nil { + return nil, err + } + + f := &influxdb.AnnotationListFilter{ + StreamIncludes: r.URL.Query()["streamIncludes"], + BasicFilter: influxdb.BasicFilter{ + EndTime: endTime, + StartTime: startTime, + }, + } + f.SetStickerIncludes(r.URL.Query()) + if err := f.Validate(time.Now); err != nil { + return nil, err + } + + return f, nil +} + +func decodeDeleteAnnotationsRequest(r *http.Request) (*influxdb.AnnotationDeleteFilter, error) { + // Try to get a stream ID from the query params. The stream ID is not required, + // so if one is not set we can leave streamID as the zero value. + var streamID platform.ID + if qid := chi.URLParam(r, "streamID"); qid != "" { + id, err := platform.IDFromString(qid) + // if a streamID parameter was provided but is not valid, return an error + if err != nil { + return nil, errBadStreamId + } + streamID = *id + } + + startTime, endTime, err := tFromReq(r) + if err != nil { + return nil, err + } + + f := &influxdb.AnnotationDeleteFilter{ + StreamTag: r.URL.Query().Get("stream"), + StreamID: streamID, + EndTime: endTime, + StartTime: startTime, + } + f.SetStickers(r.URL.Query()) + if err := f.Validate(); err != nil { + return nil, err + } + + return f, nil +} + +func decodeUpdateAnnotationRequest(r *http.Request) (*influxdb.AnnotationCreate, error) { + u := &influxdb.AnnotationCreate{} + if err := json.NewDecoder(r.Body).Decode(u); err != nil { + return nil, err + } else if err := u.Validate(time.Now); err != nil { + return nil, err + } + + return u, nil +} + +func storedAnnotationsToReadAnnotations(s []influxdb.StoredAnnotation) (influxdb.ReadAnnotations, error) { + r := influxdb.ReadAnnotations{} + + for _, val := range s { + r[val.StreamTag] = append(r[val.StreamTag], influxdb.ReadAnnotation{ + ID: val.ID, + Summary: val.Summary, + Message: val.Message, + Stickers: val.Stickers, + StartTime: val.Lower, + EndTime: val.Upper, + }) + } + + return r, nil +} + +func storedAnnotationToEvent(s *influxdb.StoredAnnotation) (*influxdb.AnnotationEvent, error) { + st, err := tStringToPointer(s.Lower) + if err != nil { + return nil, err + } + + et, err := tStringToPointer(s.Upper) + if err != nil { + return nil, err + } + + return &influxdb.AnnotationEvent{ + ID: s.ID, + AnnotationCreate: influxdb.AnnotationCreate{ + StreamTag: s.StreamTag, + Summary: s.Summary, + Message: s.Message, + Stickers: s.Stickers, + EndTime: et, + StartTime: st, + }, + }, nil +} diff --git a/annotations/transport/annotations_router_test.go b/annotations/transport/annotations_router_test.go new file mode 100644 index 00000000000..0b0d89a0421 --- /dev/null +++ b/annotations/transport/annotations_router_test.go @@ -0,0 +1,267 @@ +package transport + +import ( + "encoding/json" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/influxdata/influxdb/v2" + influxdbtesting "github.com/influxdata/influxdb/v2/testing" + "github.com/stretchr/testify/require" +) + +var ( + testCreateAnnotation = influxdb.AnnotationCreate{ + StreamTag: "sometag", + Summary: "testing the api", + Message: "stored annotation message", + Stickers: map[string]string{"val1": "sticker1", "val2": "sticker2"}, + EndTime: &now, + StartTime: &now, + } + + testEvent = influxdb.AnnotationEvent{ + ID: *id, + AnnotationCreate: testCreateAnnotation, + } + + testReadAnnotation1 = influxdb.ReadAnnotation{ + ID: *influxdbtesting.IDPtr(1), + } + + testReadAnnotation2 = influxdb.ReadAnnotation{ + ID: *influxdbtesting.IDPtr(2), + } + + testStoredAnnotation = influxdb.StoredAnnotation{ + ID: *id, + OrgID: *orgID, + StreamID: *influxdbtesting.IDPtr(3), + StreamTag: "sometag", + Summary: "testing the api", + Message: "stored annotation message", + Stickers: map[string]string{"val1": "sticker1", "val2": "sticker2"}, + Lower: now.Format(time.RFC3339), + Upper: now.Format(time.RFC3339), + } + + testReadAnnotations = influxdb.ReadAnnotations{ + "sometag": []influxdb.ReadAnnotation{ + { + ID: testStoredAnnotation.ID, + Summary: testStoredAnnotation.Summary, + Message: testStoredAnnotation.Message, + Stickers: map[string]string{"val1": "sticker1", "val2": "sticker2"}, + EndTime: testStoredAnnotation.Lower, + StartTime: testStoredAnnotation.Upper, + }, + }, + } +) + +func TestAnnotationRouter(t *testing.T) { + t.Parallel() + + t.Run("get annotations happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "GET", ts.URL+"/annotations", nil) + + q := req.URL.Query() + q.Add("orgID", orgStr) + q.Add("endTime", now.Format(time.RFC3339)) + q.Add("stickerIncludes[product]", "oss") + q.Add("stickerIncludes[env]", "dev") + q.Add("streamIncludes", "stream1") + q.Add("streamIncludes", "stream2") + req.URL.RawQuery = q.Encode() + + want := []influxdb.AnnotationList{ + { + StreamTag: "stream1", + Annotations: []influxdb.ReadAnnotation{testReadAnnotation1}, + }, + { + StreamTag: "stream2", + Annotations: []influxdb.ReadAnnotation{testReadAnnotation2}, + }, + } + + svc.EXPECT(). + ListAnnotations(gomock.Any(), *orgID, influxdb.AnnotationListFilter{ + StickerIncludes: map[string]string{"product": "oss", "env": "dev"}, + StreamIncludes: []string{"stream1", "stream2"}, + BasicFilter: influxdb.BasicFilter{ + StartTime: &time.Time{}, + EndTime: &now, + }, + }). + Return([]influxdb.StoredAnnotation{ + { + ID: testReadAnnotation1.ID, + StreamTag: "stream1", + }, + { + ID: testReadAnnotation2.ID, + StreamTag: "stream2", + }, + }, nil) + + res := doTestRequest(t, req, http.StatusOK, true) + + got := []influxdb.AnnotationList{} + err := json.NewDecoder(res.Body).Decode(&got) + require.NoError(t, err) + require.ElementsMatch(t, want, got) + }) + + t.Run("create annotations happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + createAnnotations := []influxdb.AnnotationCreate{testCreateAnnotation} + + req := newTestRequest(t, "POST", ts.URL+"/annotations", createAnnotations) + + q := req.URL.Query() + q.Add("orgID", orgStr) + req.URL.RawQuery = q.Encode() + + want := []influxdb.AnnotationEvent{testEvent} + + svc.EXPECT(). + CreateAnnotations(gomock.Any(), *orgID, createAnnotations). + Return(want, nil) + + res := doTestRequest(t, req, http.StatusOK, true) + + got := []influxdb.AnnotationEvent{} + err := json.NewDecoder(res.Body).Decode(&got) + require.NoError(t, err) + require.Equal(t, want, got) + }) + + t.Run("delete annotations happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "DELETE", ts.URL+"/annotations", nil) + q := req.URL.Query() + q.Add("orgID", orgStr) + q.Add("stream", "someTag") + q.Add("startTime", now.Format(time.RFC3339)) + q.Add("endTime", later.Format(time.RFC3339)) + req.URL.RawQuery = q.Encode() + + svc.EXPECT(). + DeleteAnnotations(gomock.Any(), *orgID, influxdb.AnnotationDeleteFilter{ + StreamTag: "someTag", + StartTime: &now, + EndTime: &later, + Stickers: map[string]string{}, + }). + Return(nil) + + doTestRequest(t, req, http.StatusNoContent, false) + }) + + t.Run("get annotation happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "GET", ts.URL+"/annotations/"+idStr, nil) + + svc.EXPECT(). + GetAnnotation(gomock.Any(), *id). + Return(&testStoredAnnotation, nil) + + res := doTestRequest(t, req, http.StatusOK, true) + + got := &influxdb.AnnotationEvent{} + err := json.NewDecoder(res.Body).Decode(got) + require.NoError(t, err) + require.Equal(t, &testEvent, got) + }) + + t.Run("delete annotation happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "DELETE", ts.URL+"/annotations/"+idStr, nil) + + svc.EXPECT(). + DeleteAnnotation(gomock.Any(), *id). + Return(nil) + + doTestRequest(t, req, http.StatusNoContent, false) + }) + + t.Run("update annotation happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "PUT", ts.URL+"/annotations/"+idStr, testCreateAnnotation) + + svc.EXPECT(). + UpdateAnnotation(gomock.Any(), *id, testCreateAnnotation). + Return(&testEvent, nil) + + res := doTestRequest(t, req, http.StatusOK, true) + + got := &influxdb.AnnotationEvent{} + err := json.NewDecoder(res.Body).Decode(got) + require.NoError(t, err) + require.Equal(t, &testEvent, got) + }) + + t.Run("invalid org ids return 400 when required", func(t *testing.T) { + methods := []string{"POST", "GET", "DELETE"} + + for _, m := range methods { + t.Run(m, func(t *testing.T) { + ts, _ := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, m, ts.URL+"/annotations", nil) + q := req.URL.Query() + q.Add("orgID", "badid") + req.URL.RawQuery = q.Encode() + + doTestRequest(t, req, http.StatusBadRequest, false) + }) + } + }) + + t.Run("invalid annotation ids return 400 when required", func(t *testing.T) { + methods := []string{"GET", "DELETE", "PUT"} + + for _, m := range methods { + t.Run(m, func(t *testing.T) { + ts, _ := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, m, ts.URL+"/annotations/badID", nil) + doTestRequest(t, req, http.StatusBadRequest, false) + }) + } + }) +} + +func TestStoredAnnotationsToReadAnnotations(t *testing.T) { + t.Parallel() + + got, err := storedAnnotationsToReadAnnotations([]influxdb.StoredAnnotation{testStoredAnnotation}) + require.NoError(t, err) + require.Equal(t, got, testReadAnnotations) +} + +func TestStoredAnnotationToEvent(t *testing.T) { + t.Parallel() + + got, err := storedAnnotationToEvent(&testStoredAnnotation) + require.NoError(t, err) + require.Equal(t, got, &testEvent) +} diff --git a/annotations/transport/helpers_test.go b/annotations/transport/helpers_test.go new file mode 100644 index 00000000000..6b0c501b95b --- /dev/null +++ b/annotations/transport/helpers_test.go @@ -0,0 +1,54 @@ +package transport + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" +) + +var ( + orgStr = "1234123412341234" + orgID, _ = platform.IDFromString(orgStr) + idStr = "4321432143214321" + id, _ = platform.IDFromString(idStr) + now = time.Now().UTC().Truncate(time.Second) + later = now.Add(5 * time.Minute) +) + +func newTestServer(t *testing.T) (*httptest.Server, *mock.MockAnnotationService) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + server := NewAnnotationHandler(zaptest.NewLogger(t), svc) + return httptest.NewServer(server), svc +} + +func newTestRequest(t *testing.T, method, path string, body interface{}) *http.Request { + dat, err := json.Marshal(body) + require.NoError(t, err) + + req, err := http.NewRequest(method, path, bytes.NewBuffer(dat)) + require.NoError(t, err) + + req.Header.Add("Content-Type", "application/json") + + return req +} + +func doTestRequest(t *testing.T, req *http.Request, wantCode int, needJSON bool) *http.Response { + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, wantCode, res.StatusCode) + if needJSON { + require.Equal(t, "application/json; charset=utf-8", res.Header.Get("Content-Type")) + } + return res +} diff --git a/annotations/transport/http.go b/annotations/transport/http.go new file mode 100644 index 00000000000..f603e9b7b42 --- /dev/null +++ b/annotations/transport/http.go @@ -0,0 +1,104 @@ +package transport + +import ( + "net/http" + "time" + + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform/errors" + kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" + "go.uber.org/zap" +) + +const ( + // this is the base api prefix, since the annotations system mounts handlers at + // both the ../annotations and ../streams paths. + prefixAnnotations = "/api/v2private" +) + +var ( + errBadOrg = &errors.Error{ + Code: errors.EInvalid, + Msg: "invalid or missing org id", + } + + errBadAnnotationId = &errors.Error{ + Code: errors.EInvalid, + Msg: "annotation id is invalid", + } + + errBadStreamId = &errors.Error{ + Code: errors.EInvalid, + Msg: "stream id is invalid", + } + + errBadStreamName = &errors.Error{ + Code: errors.EInvalid, + Msg: "invalid stream name", + } +) + +// AnnotationsHandler is the handler for the annotation service +type AnnotationHandler struct { + chi.Router + + log *zap.Logger + api *kithttp.API + + annotationService influxdb.AnnotationService +} + +func NewAnnotationHandler(log *zap.Logger, annotationService influxdb.AnnotationService) *AnnotationHandler { + h := &AnnotationHandler{ + log: log, + api: kithttp.NewAPI(kithttp.WithLog(log)), + annotationService: annotationService, + } + + r := chi.NewRouter() + r.Use( + middleware.Recoverer, + middleware.RequestID, + middleware.RealIP, + ) + + r.Mount("/annotations", h.annotationsRouter()) + r.Mount("/streams", h.streamsRouter()) + h.Router = r + + return h +} + +func (h *AnnotationHandler) Prefix() string { + return prefixAnnotations +} + +// tFromReq and tStringToPointer are used in handlers to extract time values from query parameters. +// pointers to time.Time structs are used, since the JSON responses may omit empty (nil pointer) times. +func tFromReq(r *http.Request) (*time.Time, *time.Time, error) { + st, err := tStringToPointer(r.URL.Query().Get("startTime")) + if err != nil { + return nil, nil, err + } + + et, err := tStringToPointer(r.URL.Query().Get("endTime")) + if err != nil { + return nil, nil, err + } + + return st, et, nil +} + +func tStringToPointer(s string) (*time.Time, error) { + if s == "" { + return nil, nil + } + + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return nil, err + } + return &t, nil +} diff --git a/annotations/transport/streams_router.go b/annotations/transport/streams_router.go new file mode 100644 index 00000000000..2ff90a533f2 --- /dev/null +++ b/annotations/transport/streams_router.go @@ -0,0 +1,206 @@ +package transport + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/go-chi/chi" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" +) + +func (h *AnnotationHandler) streamsRouter() http.Handler { + r := chi.NewRouter() + + r.Put("/", h.handleCreateOrUpdateStream) + r.Get("/", h.handleGetStreams) + r.Delete("/", h.handleDeleteStreams) + + r.Route("/{id}", func(r chi.Router) { + r.Delete("/", h.handleDeleteStream) + r.Put("/", h.handleUpdateStreamByID) + }) + + return r +} + +func (h *AnnotationHandler) handleCreateOrUpdateStream(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + o, err := platform.IDFromString(r.URL.Query().Get("orgID")) + if err != nil { + h.api.Err(w, r, errBadOrg) + return + } + + u, err := decodeCreateOrUpdateStreamRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + s, err := h.annotationService.CreateOrUpdateStream(ctx, *o, *u) + if err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusOK, s) +} + +func (h *AnnotationHandler) handleGetStreams(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + o, err := platform.IDFromString(r.URL.Query().Get("orgID")) + if err != nil { + h.api.Err(w, r, errBadOrg) + return + } + + f, err := decodeListStreamsRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + s, err := h.annotationService.ListStreams(ctx, *o, *f) + if err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusOK, storedStreamsToReadStreams(s)) +} + +// Delete stream(s) by name, capable of handling a list of names +func (h *AnnotationHandler) handleDeleteStreams(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + o, err := platform.IDFromString(r.URL.Query().Get("orgID")) + if err != nil { + h.api.Err(w, r, errBadOrg) + return + } + + f, err := decodeDeleteStreamsRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + // delete all of the streams according to the filter. annotations associated with the stream + // will be deleted by the ON DELETE CASCADE relationship between streams and annotations. + if err = h.annotationService.DeleteStreams(ctx, *o, *f); err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusNoContent, nil) +} + +// Delete a single stream by ID +func (h *AnnotationHandler) handleDeleteStream(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + id, err := platform.IDFromString(chi.URLParam(r, "id")) + if err != nil { + h.api.Err(w, r, errBadAnnotationId) + return + } + + // as in the handleDeleteStreams method above, deleting a stream will delete annotations + // associated with it due to the ON DELETE CASCADE relationship between the two + if err := h.annotationService.DeleteStreamByID(ctx, *id); err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusNoContent, nil) +} + +func (h *AnnotationHandler) handleUpdateStreamByID(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + id, err := platform.IDFromString(chi.URLParam(r, "id")) + if err != nil { + h.api.Err(w, r, errBadAnnotationId) + return + } + + u, err := decodeCreateOrUpdateStreamRequest(r) + if err != nil { + h.api.Err(w, r, err) + return + } + + s, err := h.annotationService.UpdateStream(ctx, *id, *u) + if err != nil { + h.api.Err(w, r, err) + return + } + + h.api.Respond(w, r, http.StatusOK, s) +} + +func decodeCreateOrUpdateStreamRequest(r *http.Request) (*influxdb.Stream, error) { + s := influxdb.Stream{} + + if err := json.NewDecoder(r.Body).Decode(&s); err != nil { + return nil, err + } + + if err := s.Validate(false); err != nil { + return nil, err + } + + return &s, nil +} + +func decodeListStreamsRequest(r *http.Request) (*influxdb.StreamListFilter, error) { + startTime, endTime, err := tFromReq(r) + if err != nil { + return nil, err + } + + f := &influxdb.StreamListFilter{ + StreamIncludes: r.URL.Query()["streamIncludes"], + BasicFilter: influxdb.BasicFilter{ + EndTime: endTime, + StartTime: startTime, + }, + } + + if err := f.Validate(time.Now); err != nil { + return nil, err + } + return f, nil +} + +func decodeDeleteStreamsRequest(r *http.Request) (*influxdb.BasicStream, error) { + f := &influxdb.BasicStream{ + Names: r.URL.Query()["stream"], + } + + if !f.IsValid() { + return nil, errBadStreamName + } + + return f, nil +} + +func storedStreamsToReadStreams(stored []influxdb.StoredStream) []influxdb.ReadStream { + r := make([]influxdb.ReadStream, 0, len(stored)) + + for _, s := range stored { + r = append(r, influxdb.ReadStream{ + ID: s.ID, + Name: s.Name, + Description: s.Description, + CreatedAt: s.CreatedAt, + UpdatedAt: s.UpdatedAt, + }) + } + + return r +} diff --git a/annotations/transport/streams_router_test.go b/annotations/transport/streams_router_test.go new file mode 100644 index 00000000000..ece4806d483 --- /dev/null +++ b/annotations/transport/streams_router_test.go @@ -0,0 +1,198 @@ +package transport + +import ( + "encoding/json" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/influxdata/influxdb/v2" + influxdbtesting "github.com/influxdata/influxdb/v2/testing" + "github.com/stretchr/testify/require" +) + +var ( + testCreateStream = influxdb.Stream{ + Name: "test stream", + } + + testReadStream1 = &influxdb.ReadStream{ + ID: *influxdbtesting.IDPtr(1), + Name: "test stream 1", + CreatedAt: now, + UpdatedAt: now, + } + + testReadStream2 = &influxdb.ReadStream{ + ID: *influxdbtesting.IDPtr(2), + Name: "test stream 2", + CreatedAt: now, + UpdatedAt: now, + } + + testStoredStream1 = influxdb.StoredStream{ + ID: testReadStream1.ID, + OrgID: *orgID, + Name: testReadStream1.Name, + Description: testReadStream1.Description, + CreatedAt: testReadStream1.CreatedAt, + UpdatedAt: testReadStream1.UpdatedAt, + } + + testStoredStream2 = influxdb.StoredStream{ + ID: testReadStream2.ID, + OrgID: *orgID, + Name: testReadStream2.Name, + Description: testReadStream2.Description, + CreatedAt: testReadStream2.CreatedAt, + UpdatedAt: testReadStream2.UpdatedAt, + } +) + +func TestStreamsRouter(t *testing.T) { + t.Parallel() + + t.Run("create or update stream happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "PUT", ts.URL+"/streams", testCreateStream) + + q := req.URL.Query() + q.Add("orgID", orgStr) + req.URL.RawQuery = q.Encode() + + svc.EXPECT(). + CreateOrUpdateStream(gomock.Any(), *orgID, testCreateStream). + Return(testReadStream1, nil) + + res := doTestRequest(t, req, http.StatusOK, true) + + got := &influxdb.ReadStream{} + err := json.NewDecoder(res.Body).Decode(got) + require.NoError(t, err) + require.Equal(t, testReadStream1, got) + }) + + t.Run("get streams happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "GET", ts.URL+"/streams", nil) + + q := req.URL.Query() + q.Add("orgID", orgStr) + q.Add("endTime", now.Format(time.RFC3339)) + q.Add("streamIncludes", "stream1") + q.Add("streamIncludes", "stream2") + req.URL.RawQuery = q.Encode() + + svc.EXPECT(). + ListStreams(gomock.Any(), *orgID, influxdb.StreamListFilter{ + StreamIncludes: []string{"stream1", "stream2"}, + BasicFilter: influxdb.BasicFilter{ + StartTime: &time.Time{}, + EndTime: &now, + }, + }). + Return([]influxdb.StoredStream{testStoredStream1, testStoredStream2}, nil) + + res := doTestRequest(t, req, http.StatusOK, true) + + got := []influxdb.ReadStream{} + err := json.NewDecoder(res.Body).Decode(&got) + require.NoError(t, err) + require.ElementsMatch(t, []influxdb.ReadStream{*testReadStream1, *testReadStream2}, got) + }) + + t.Run("delete streams (by name) happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "DELETE", ts.URL+"/streams", nil) + q := req.URL.Query() + q.Add("orgID", orgStr) + q.Add("stream", "stream1") + q.Add("stream", "stream2") + req.URL.RawQuery = q.Encode() + + svc.EXPECT(). + DeleteStreams(gomock.Any(), *orgID, influxdb.BasicStream{ + Names: []string{"stream1", "stream2"}, + }). + Return(nil) + + doTestRequest(t, req, http.StatusNoContent, false) + }) + + t.Run("delete stream happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "DELETE", ts.URL+"/streams/"+idStr, nil) + + svc.EXPECT(). + DeleteStreamByID(gomock.Any(), *id). + Return(nil) + + doTestRequest(t, req, http.StatusNoContent, false) + }) + + t.Run("update stream by id happy path", func(t *testing.T) { + ts, svc := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, "PUT", ts.URL+"/streams/"+idStr, testCreateStream) + + svc.EXPECT(). + UpdateStream(gomock.Any(), *id, testCreateStream). + Return(testReadStream1, nil) + + res := doTestRequest(t, req, http.StatusOK, true) + + got := &influxdb.ReadStream{} + err := json.NewDecoder(res.Body).Decode(got) + require.NoError(t, err) + require.Equal(t, testReadStream1, got) + }) + + t.Run("invalid org ids return 400 when required", func(t *testing.T) { + methods := []string{"GET", "PUT", "DELETE"} + + for _, m := range methods { + t.Run(m, func(t *testing.T) { + ts, _ := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, m, ts.URL+"/streams", nil) + q := req.URL.Query() + q.Add("orgID", "badid") + req.URL.RawQuery = q.Encode() + + doTestRequest(t, req, http.StatusBadRequest, false) + }) + } + }) + + t.Run("invalid stream ids return 400 when required", func(t *testing.T) { + methods := []string{"DELETE", "PUT"} + + for _, m := range methods { + t.Run(m, func(t *testing.T) { + ts, _ := newTestServer(t) + defer ts.Close() + + req := newTestRequest(t, m, ts.URL+"/streams/badID", nil) + doTestRequest(t, req, http.StatusBadRequest, false) + }) + } + }) +} + +func TestStoredStreamsToReadStreams(t *testing.T) { + t.Parallel() + + got := storedStreamsToReadStreams([]influxdb.StoredStream{testStoredStream1, testStoredStream2}) + require.Equal(t, got, []influxdb.ReadStream{*testReadStream1, *testReadStream2}) +} diff --git a/auth.go b/auth.go index 5e030034b04..e5910b266f9 100644 --- a/auth.go +++ b/auth.go @@ -3,25 +3,28 @@ package influxdb import ( "context" "fmt" + + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) // AuthorizationKind is returned by (*Authorization).Kind(). const AuthorizationKind = "authorization" // ErrUnableToCreateToken sanitized error message for all errors when a user cannot create a token -var ErrUnableToCreateToken = &Error{ +var ErrUnableToCreateToken = &errors.Error{ Msg: "unable to create token", - Code: EInvalid, + Code: errors.EInvalid, } // Authorization is an authorization. 🎉 type Authorization struct { - ID ID `json:"id"` + ID platform.ID `json:"id"` Token string `json:"token"` Status Status `json:"status"` Description string `json:"description"` - OrgID ID `json:"orgID"` - UserID ID `json:"userID,omitempty"` + OrgID platform.ID `json:"orgID"` + UserID platform.ID `json:"userID,omitempty"` Permissions []Permission `json:"permissions"` CRUDLog } @@ -36,9 +39,9 @@ type AuthorizationUpdate struct { func (a *Authorization) Valid() error { for _, p := range a.Permissions { if p.Resource.OrgID != nil && *p.Resource.OrgID != a.OrgID { - return &Error{ + return &errors.Error{ Msg: fmt.Sprintf("permission %s is not for org id %s", p, a.OrgID), - Code: EInvalid, + Code: errors.EInvalid, } } } @@ -49,8 +52,8 @@ func (a *Authorization) Valid() error { // PermissionSet returns the set of permissions associated with the Authorization. func (a *Authorization) PermissionSet() (PermissionSet, error) { if !a.IsActive() { - return nil, &Error{ - Code: EUnauthorized, + return nil, &errors.Error{ + Code: errors.EUnauthorized, Msg: "token is inactive", } } @@ -69,7 +72,7 @@ func (a *Authorization) IsActive() bool { } // GetUserID returns the user id. -func (a *Authorization) GetUserID() ID { +func (a *Authorization) GetUserID() platform.ID { return a.UserID } @@ -77,7 +80,7 @@ func (a *Authorization) GetUserID() ID { func (a *Authorization) Kind() string { return AuthorizationKind } // Identifier returns the authorizations ID and is used for auditing. -func (a *Authorization) Identifier() ID { return a.ID } +func (a *Authorization) Identifier() platform.ID { return a.ID } // auth service op const ( @@ -92,7 +95,7 @@ const ( // AuthorizationService represents a service for managing authorization data. type AuthorizationService interface { // Returns a single authorization by ID. - FindAuthorizationByID(ctx context.Context, id ID) (*Authorization, error) + FindAuthorizationByID(ctx context.Context, id platform.ID) (*Authorization, error) // Returns a single authorization by Token. FindAuthorizationByToken(ctx context.Context, t string) (*Authorization, error) @@ -105,20 +108,20 @@ type AuthorizationService interface { CreateAuthorization(ctx context.Context, a *Authorization) error // UpdateAuthorization updates the status and description if available. - UpdateAuthorization(ctx context.Context, id ID, upd *AuthorizationUpdate) (*Authorization, error) + UpdateAuthorization(ctx context.Context, id platform.ID, upd *AuthorizationUpdate) (*Authorization, error) // Removes a authorization by token. - DeleteAuthorization(ctx context.Context, id ID) error + DeleteAuthorization(ctx context.Context, id platform.ID) error } // AuthorizationFilter represents a set of filter that restrict the returned results. type AuthorizationFilter struct { Token *string - ID *ID + ID *platform.ID - UserID *ID + UserID *platform.ID User *string - OrgID *ID + OrgID *platform.ID Org *string } diff --git a/authorization/error.go b/authorization/error.go index 2a538375faf..8247169e03b 100644 --- a/authorization/error.go +++ b/authorization/error.go @@ -3,64 +3,64 @@ package authorization import ( "fmt" - "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) var ( // ErrInvalidAuthID is used when the Authorization's ID cannot be encoded - ErrInvalidAuthID = &influxdb.Error{ - Code: influxdb.EInvalid, + ErrInvalidAuthID = &errors.Error{ + Code: errors.EInvalid, Msg: "authorization ID is invalid", } // ErrAuthNotFound is used when the specified auth cannot be found - ErrAuthNotFound = &influxdb.Error{ - Code: influxdb.ENotFound, + ErrAuthNotFound = &errors.Error{ + Code: errors.ENotFound, Msg: "authorization not found", } // NotUniqueIDError occurs when attempting to create an Authorization with an ID that already belongs to another one - NotUniqueIDError = &influxdb.Error{ - Code: influxdb.EConflict, + NotUniqueIDError = &errors.Error{ + Code: errors.EConflict, Msg: "ID already exists", } // ErrFailureGeneratingID occurs ony when the random number generator // cannot generate an ID in MaxIDGenerationN times. - ErrFailureGeneratingID = &influxdb.Error{ - Code: influxdb.EInternal, + ErrFailureGeneratingID = &errors.Error{ + Code: errors.EInternal, Msg: "unable to generate valid id", } // ErrTokenAlreadyExistsError is used when attempting to create an authorization // with a token that already exists - ErrTokenAlreadyExistsError = &influxdb.Error{ - Code: influxdb.EConflict, + ErrTokenAlreadyExistsError = &errors.Error{ + Code: errors.EConflict, Msg: "token already exists", } ) // ErrInvalidAuthIDError is used when a service was provided an invalid ID. -func ErrInvalidAuthIDError(err error) *influxdb.Error { - return &influxdb.Error{ - Code: influxdb.EInvalid, +func ErrInvalidAuthIDError(err error) *errors.Error { + return &errors.Error{ + Code: errors.EInvalid, Msg: "auth id provided is invalid", Err: err, } } // ErrInternalServiceError is used when the error comes from an internal system. -func ErrInternalServiceError(err error) *influxdb.Error { - return &influxdb.Error{ - Code: influxdb.EInternal, +func ErrInternalServiceError(err error) *errors.Error { + return &errors.Error{ + Code: errors.EInternal, Err: err, } } // UnexpectedAuthIndexError is used when the error comes from an internal system. -func UnexpectedAuthIndexError(err error) *influxdb.Error { - return &influxdb.Error{ - Code: influxdb.EInternal, +func UnexpectedAuthIndexError(err error) *errors.Error { + return &errors.Error{ + Code: errors.EInternal, Msg: fmt.Sprintf("unexpected error retrieving auth index; Err: %v", err), } } diff --git a/authorization/http_client.go b/authorization/http_client.go index 48e0b77e105..d51d8366ef2 100644 --- a/authorization/http_client.go +++ b/authorization/http_client.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/pkg/httpc" ) @@ -72,7 +73,7 @@ func (s *AuthorizationClientService) FindAuthorizationByToken(ctx context.Contex } // FindAuthorizationByID finds a single Authorization by its ID against a remote influx server. -func (s *AuthorizationClientService) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { +func (s *AuthorizationClientService) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { var b influxdb.Authorization err := s.Client. Get(prefixAuthorization, id.String()). @@ -85,7 +86,7 @@ func (s *AuthorizationClientService) FindAuthorizationByID(ctx context.Context, } // UpdateAuthorization updates the status and description if available. -func (s *AuthorizationClientService) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { +func (s *AuthorizationClientService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { var res authResponse err := s.Client. PatchJSON(upd, prefixAuthorization, id.String()). @@ -99,7 +100,7 @@ func (s *AuthorizationClientService) UpdateAuthorization(ctx context.Context, id } // DeleteAuthorization removes a authorization by id. -func (s *AuthorizationClientService) DeleteAuthorization(ctx context.Context, id influxdb.ID) error { +func (s *AuthorizationClientService) DeleteAuthorization(ctx context.Context, id platform.ID) error { return s.Client. Delete(prefixAuthorization, id.String()). Do(ctx) diff --git a/authorization/http_server.go b/authorization/http_server.go index ab1e8f4a092..b27b68f355c 100644 --- a/authorization/http_server.go +++ b/authorization/http_server.go @@ -11,17 +11,19 @@ import ( "github.com/go-chi/chi/middleware" "github.com/influxdata/influxdb/v2" icontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" "go.uber.org/zap" ) // TenantService is used to look up the Organization and User for an Authorization type TenantService interface { - FindOrganizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) + FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) - FindUserByID(ctx context.Context, id influxdb.ID) (*influxdb.User, error) + FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) - FindBucketByID(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) + FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) } type AuthHandler struct { @@ -126,20 +128,20 @@ func getAuthorizedUser(r *http.Request, ts TenantService) (*influxdb.User, error type postAuthorizationRequest struct { Status influxdb.Status `json:"status"` - OrgID influxdb.ID `json:"orgID"` - UserID *influxdb.ID `json:"userID,omitempty"` + OrgID platform.ID `json:"orgID"` + UserID *platform.ID `json:"userID,omitempty"` Description string `json:"description"` Permissions []influxdb.Permission `json:"permissions"` } type authResponse struct { - ID influxdb.ID `json:"id"` + ID platform.ID `json:"id"` Token string `json:"token"` Status influxdb.Status `json:"status"` Description string `json:"description"` - OrgID influxdb.ID `json:"orgID"` + OrgID platform.ID `json:"orgID"` Org string `json:"org"` - UserID influxdb.ID `json:"userID"` + UserID platform.ID `json:"userID"` User string `json:"user"` Permissions []permissionResponse `json:"permissions"` Links map[string]string `json:"links"` @@ -181,7 +183,7 @@ func (h *AuthHandler) newAuthResponse(ctx context.Context, a *influxdb.Authoriza return res, nil } -func (p *postAuthorizationRequest) toInfluxdb(userID influxdb.ID) *influxdb.Authorization { +func (p *postAuthorizationRequest) toInfluxdb(userID platform.ID) *influxdb.Authorization { return &influxdb.Authorization{ OrgID: p.OrgID, Status: p.Status, @@ -250,24 +252,24 @@ func (p *postAuthorizationRequest) SetDefaults() { func (p *postAuthorizationRequest) Validate() error { if len(p.Permissions) == 0 { - return &influxdb.Error{ - Code: influxdb.EInvalid, + return &errors.Error{ + Code: errors.EInvalid, Msg: "authorization must include permissions", } } for _, perm := range p.Permissions { if err := perm.Valid(); err != nil { - return &influxdb.Error{ + return &errors.Error{ Err: err, } } } if !p.OrgID.Valid() { - return &influxdb.Error{ - Err: influxdb.ErrInvalidID, - Code: influxdb.EInvalid, + return &errors.Error{ + Err: platform.ErrInvalidID, + Code: errors.EInvalid, Msg: "org id required", } } @@ -307,7 +309,7 @@ func (h *AuthHandler) newPermissionsResponse(ctx context.Context, ps []influxdb. if p.Resource.ID != nil { name, err := h.getNameForResource(ctx, p.Resource.Type, *p.Resource.ID) - if influxdb.ErrorCode(err) == influxdb.ENotFound { + if errors.ErrorCode(err) == errors.ENotFound { continue } if err != nil { @@ -318,7 +320,7 @@ func (h *AuthHandler) newPermissionsResponse(ctx context.Context, ps []influxdb. if p.Resource.OrgID != nil { name, err := h.getNameForResource(ctx, influxdb.OrgsResourceType, *p.Resource.OrgID) - if influxdb.ErrorCode(err) == influxdb.ENotFound { + if errors.ErrorCode(err) == errors.ENotFound { continue } if err != nil { @@ -330,13 +332,13 @@ func (h *AuthHandler) newPermissionsResponse(ctx context.Context, ps []influxdb. return res, nil } -func (h *AuthHandler) getNameForResource(ctx context.Context, resource influxdb.ResourceType, id influxdb.ID) (string, error) { +func (h *AuthHandler) getNameForResource(ctx context.Context, resource influxdb.ResourceType, id platform.ID) (string, error) { if err := resource.Valid(); err != nil { return "", err } if ok := id.Valid(); !ok { - return "", influxdb.ErrInvalidID + return "", platform.ErrInvalidID } switch resource { @@ -366,8 +368,8 @@ func (h *AuthHandler) getNameForResource(ctx context.Context, resource influxdb. func decodePostAuthorizationRequest(ctx context.Context, r *http.Request) (*postAuthorizationRequest, error) { a := &postAuthorizationRequest{} if err := json.NewDecoder(r.Body).Decode(a); err != nil { - return nil, &influxdb.Error{ - Code: influxdb.EInvalid, + return nil, &errors.Error{ + Code: errors.EInvalid, Msg: "invalid json structure", Err: err, } @@ -388,17 +390,9 @@ func (h *AuthHandler) handleGetAuthorizations(w http.ResponseWriter, r *http.Req return } - opts := influxdb.FindOptions{} - as, _, err := h.authSvc.FindAuthorizations(ctx, req.filter, opts) - - if err != nil { - h.api.Err(w, r, err) - return - } - f := req.filter - // If the user or org name was provided, look up the ID first - if f.User != nil { + // Look up user ID and org ID if they were not provided, but names were + if f.UserID == nil && f.User != nil { u, err := h.tenantService.FindUser(ctx, influxdb.UserFilter{Name: f.User}) if err != nil { h.api.Err(w, r, err) @@ -407,7 +401,7 @@ func (h *AuthHandler) handleGetAuthorizations(w http.ResponseWriter, r *http.Req f.UserID = &u.ID } - if f.Org != nil { + if f.OrgID == nil && f.Org != nil { o, err := h.tenantService.FindOrganization(ctx, influxdb.OrganizationFilter{Name: f.Org}) if err != nil { h.api.Err(w, r, err) @@ -416,6 +410,14 @@ func (h *AuthHandler) handleGetAuthorizations(w http.ResponseWriter, r *http.Req f.OrgID = &o.ID } + opts := influxdb.FindOptions{} + as, _, err := h.authSvc.FindAuthorizations(ctx, f, opts) + + if err != nil { + h.api.Err(w, r, err) + return + } + auths := make([]*authResponse, 0, len(as)) for _, a := range as { ps, err := h.newPermissionsResponse(ctx, a.Permissions) @@ -448,7 +450,7 @@ func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getA userID := qp.Get("userID") if userID != "" { - id, err := influxdb.IDFromString(userID) + id, err := platform.IDFromString(userID) if err != nil { return nil, err } @@ -462,7 +464,7 @@ func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getA orgID := qp.Get("orgID") if orgID != "" { - id, err := influxdb.IDFromString(orgID) + id, err := platform.IDFromString(orgID) if err != nil { return nil, err } @@ -476,7 +478,7 @@ func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getA authID := qp.Get("id") if authID != "" { - id, err := influxdb.IDFromString(authID) + id, err := platform.IDFromString(authID) if err != nil { return nil, err } @@ -489,7 +491,7 @@ func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getA func (h *AuthHandler) handleGetAuthorization(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - id, err := influxdb.IDFromString(chi.URLParam(r, "id")) + id, err := platform.IDFromString(chi.URLParam(r, "id")) if err != nil { h.log.Info("Failed to decode request", zap.String("handler", "getAuthorization"), zap.Error(err)) h.api.Err(w, r, err) @@ -559,12 +561,12 @@ func (h *AuthHandler) handleUpdateAuthorization(w http.ResponseWriter, r *http.R } type updateAuthorizationRequest struct { - ID influxdb.ID + ID platform.ID *influxdb.AuthorizationUpdate } func decodeUpdateAuthorizationRequest(ctx context.Context, r *http.Request) (*updateAuthorizationRequest, error) { - id, err := influxdb.IDFromString(chi.URLParam(r, "id")) + id, err := platform.IDFromString(chi.URLParam(r, "id")) if err != nil { return nil, err } @@ -582,7 +584,7 @@ func decodeUpdateAuthorizationRequest(ctx context.Context, r *http.Request) (*up // handleDeleteAuthorization is the HTTP handler for the DELETE /api/v2/authorizations/:id route. func (h *AuthHandler) handleDeleteAuthorization(w http.ResponseWriter, r *http.Request) { - id, err := influxdb.IDFromString(chi.URLParam(r, "id")) + id, err := platform.IDFromString(chi.URLParam(r, "id")) if err != nil { h.log.Info("Failed to decode request", zap.String("handler", "deleteAuthorization"), zap.Error(err)) h.api.Err(w, r, err) diff --git a/authorization/http_server_test.go b/authorization/http_server_test.go index 14aba3028db..c2cf01fe741 100644 --- a/authorization/http_server_test.go +++ b/authorization/http_server_test.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "sort" @@ -17,26 +17,14 @@ import ( "github.com/influxdata/httprouter" "github.com/influxdata/influxdb/v2" icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" itesting "github.com/influxdata/influxdb/v2/testing" + "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" ) -func NewTestInmemStore(t *testing.T) (kv.Store, func(), error) { - t.Helper() - - store := inmem.NewKVStore() - - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - return store, func() {}, nil -} - func TestService_handlePostAuthorization(t *testing.T) { type fields struct { AuthorizationService influxdb.AuthorizationService @@ -68,19 +56,19 @@ func TestService_handlePostAuthorization(t *testing.T) { }, }, TenantService: &tenantService{ - FindUserByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { + FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { return &influxdb.User{ ID: id, Name: "u1", }, nil }, - FindOrganizationByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: id, Name: "o1", }, nil }, - FindBucketByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: id, Name: "b1", @@ -159,11 +147,7 @@ func TestService_handlePostAuthorization(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Helper() - s, _, err := NewTestInmemStore(t) - if err != nil { - t.Fatal(err) - } - + s := itesting.NewTestInmemStore(t) storage, err := NewStore(s) if err != nil { t.Fatal(err) @@ -204,7 +188,7 @@ func TestService_handlePostAuthorization(t *testing.T) { res := w.Result() content := res.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(res.Body) + body, _ := io.ReadAll(res.Body) if res.StatusCode != tt.wants.statusCode { t.Logf("headers: %v body: %s", res.Header, body) @@ -246,7 +230,7 @@ func TestService_handleGetAuthorization(t *testing.T) { name: "get a authorization by id", fields: fields{ AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { + FindAuthorizationByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { if id == itesting.MustIDBase16("020f755c3c082000") { return &influxdb.Authorization{ ID: itesting.MustIDBase16("020f755c3c082000"), @@ -258,7 +242,7 @@ func TestService_handleGetAuthorization(t *testing.T) { Resource: influxdb.Resource{ Type: influxdb.BucketsResourceType, OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - ID: func() *influxdb.ID { + ID: func() *platform.ID { id := itesting.MustIDBase16("020f755c3c084000") return &id }(), @@ -273,19 +257,19 @@ func TestService_handleGetAuthorization(t *testing.T) { }, }, TenantService: &tenantService{ - FindUserByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { + FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { return &influxdb.User{ ID: id, Name: "u1", }, nil }, - FindOrganizationByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: id, Name: "o1", }, nil }, - FindBucketByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: id, Name: "b1", @@ -335,9 +319,9 @@ func TestService_handleGetAuthorization(t *testing.T) { name: "not found", fields: fields{ AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { - return nil, &influxdb.Error{ - Code: influxdb.ENotFound, + FindAuthorizationByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { + return nil, &errors.Error{ + Code: errors.ENotFound, Msg: "authorization not found", } }, @@ -373,7 +357,7 @@ func TestService_handleGetAuthorization(t *testing.T) { res := w.Result() content := res.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(res.Body) + body, _ := io.ReadAll(res.Body) if res.StatusCode != tt.wants.statusCode { t.Logf("headers: %v body: %s", res.Header, body) @@ -391,6 +375,55 @@ func TestService_handleGetAuthorization(t *testing.T) { } } +func TestGetAuthorizationsWithNames(t *testing.T) { + t.Parallel() + + testUserName := "user" + testUserID := itesting.MustIDBase16("6c7574652c206f6e") + testOrgName := "org" + testOrgID := itesting.MustIDBase16("9d70616e656d2076") + + ts := &tenantService{ + FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) { + require.Equal(t, &testUserName, f.Name) + + return &influxdb.User{ + ID: testUserID, + Name: testUserName, + }, nil + }, + + FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { + require.Equal(t, &testOrgName, f.Name) + + return &influxdb.Organization{ + ID: testOrgID, + Name: testOrgName, + }, nil + }, + } + + as := &mock.AuthorizationService{ + FindAuthorizationsFn: func(ctx context.Context, f influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { + require.Equal(t, &testOrgID, f.OrgID) + require.Equal(t, &testUserID, f.UserID) + + return []*influxdb.Authorization{}, 0, nil + }, + } + + h := NewHTTPAuthHandler(zaptest.NewLogger(t), as, ts) + + w := httptest.NewRecorder() + r := httptest.NewRequest("get", "http://any.url", nil) + qp := r.URL.Query() + qp.Add("user", testUserName) + qp.Add("org", testOrgName) + r.URL.RawQuery = qp.Encode() + + h.handleGetAuthorizations(w, r) +} + func TestService_handleGetAuthorizations(t *testing.T) { type fields struct { AuthorizationService influxdb.AuthorizationService @@ -439,14 +472,14 @@ func TestService_handleGetAuthorizations(t *testing.T) { }, }, &tenantService{ - FindUserByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { + FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { return &influxdb.User{ ID: id, Name: id.String(), }, nil }, - FindOrganizationByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: id, Name: id.String(), @@ -531,16 +564,16 @@ func TestService_handleGetAuthorizations(t *testing.T) { }, }, &tenantService{ - FindUserByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { + FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { if id.String() == "2070616e656d2076" { return &influxdb.User{ ID: id, Name: id.String(), }, nil } - return nil, &influxdb.Error{} + return nil, &errors.Error{} }, - FindOrganizationByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: id, Name: id.String(), @@ -607,20 +640,20 @@ func TestService_handleGetAuthorizations(t *testing.T) { }, }, &tenantService{ - FindUserByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { + FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { return &influxdb.User{ ID: id, Name: id.String(), }, nil }, - FindOrganizationByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { if id.String() == "3070616e656d2076" { return &influxdb.Organization{ ID: id, Name: id.String(), }, nil } - return nil, &influxdb.Error{} + return nil, &errors.Error{} }, }, }, @@ -685,11 +718,7 @@ func TestService_handleGetAuthorizations(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Helper() - s, _, err := NewTestInmemStore(t) - if err != nil { - t.Fatal(err) - } - + s := itesting.NewTestInmemStore(t) storage, err := NewStore(s) if err != nil { t.Fatal(err) @@ -717,7 +746,7 @@ func TestService_handleGetAuthorizations(t *testing.T) { res := w.Result() content := res.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(res.Body) + body, _ := io.ReadAll(res.Body) if res.StatusCode != tt.wants.statusCode { t.Errorf("%q. handleGetAuthorizations() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) @@ -759,7 +788,7 @@ func TestService_handleDeleteAuthorization(t *testing.T) { name: "remove a authorization by id", fields: fields{ &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id influxdb.ID) error { + DeleteAuthorizationFn: func(ctx context.Context, id platform.ID) error { if id == itesting.MustIDBase16("020f755c3c082000") { return nil } @@ -780,9 +809,9 @@ func TestService_handleDeleteAuthorization(t *testing.T) { name: "authorization not found", fields: fields{ &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id influxdb.ID) error { - return &influxdb.Error{ - Code: influxdb.ENotFound, + DeleteAuthorizationFn: func(ctx context.Context, id platform.ID) error { + return &errors.Error{ + Code: errors.ENotFound, Msg: "authorization not found", } }, @@ -818,7 +847,7 @@ func TestService_handleDeleteAuthorization(t *testing.T) { res := w.Result() content := res.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(res.Body) + body, _ := io.ReadAll(res.Body) if res.StatusCode != tt.wants.statusCode { t.Errorf("%q. handleDeleteAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) diff --git a/authorization/middleware_auth.go b/authorization/middleware_auth.go index a6b59ddc2ba..91a87e6081e 100644 --- a/authorization/middleware_auth.go +++ b/authorization/middleware_auth.go @@ -6,6 +6,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) type AuthedAuthorizationService struct { @@ -32,6 +34,11 @@ func (s *AuthedAuthorizationService) CreateAuthorization(ctx context.Context, a if err := authorizer.VerifyPermissions(ctx, a.Permissions); err != nil { return err } + for _, v := range a.Permissions { + if v.Resource.Type == influxdb.InstanceResourceType { + return fmt.Errorf("authorizations cannot be created with the instance type, it is only used during setup") + } + } return s.s.CreateAuthorization(ctx, a) } @@ -50,7 +57,7 @@ func (s *AuthedAuthorizationService) FindAuthorizationByToken(ctx context.Contex return a, nil } -func (s *AuthedAuthorizationService) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { +func (s *AuthedAuthorizationService) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { a, err := s.s.FindAuthorizationByID(ctx, id) if err != nil { return nil, err @@ -74,7 +81,7 @@ func (s *AuthedAuthorizationService) FindAuthorizations(ctx context.Context, fil return authorizer.AuthorizeFindAuthorizations(ctx, as) } -func (s *AuthedAuthorizationService) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { +func (s *AuthedAuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { a, err := s.s.FindAuthorizationByID(ctx, id) if err != nil { return nil, err @@ -88,7 +95,7 @@ func (s *AuthedAuthorizationService) UpdateAuthorization(ctx context.Context, id return s.s.UpdateAuthorization(ctx, id, upd) } -func (s *AuthedAuthorizationService) DeleteAuthorization(ctx context.Context, id influxdb.ID) error { +func (s *AuthedAuthorizationService) DeleteAuthorization(ctx context.Context, id platform.ID) error { a, err := s.s.FindAuthorizationByID(ctx, id) if err != nil { return err @@ -106,10 +113,10 @@ func (s *AuthedAuthorizationService) DeleteAuthorization(ctx context.Context, id func VerifyPermissions(ctx context.Context, ps []influxdb.Permission) error { for _, p := range ps { if err := authorizer.IsAllowed(ctx, p); err != nil { - return &influxdb.Error{ + return &errors.Error{ Err: err, Msg: fmt.Sprintf("permission %s is not allowed", p), - Code: influxdb.EForbidden, + Code: errors.EForbidden, } } } diff --git a/authorization/middleware_auth_test.go b/authorization/middleware_auth_test.go index 840b96e5141..7ed49182a99 100644 --- a/authorization/middleware_auth_test.go +++ b/authorization/middleware_auth_test.go @@ -11,6 +11,8 @@ import ( "github.com/influxdata/influxdb/v2/authorization" influxdbcontext "github.com/influxdata/influxdb/v2/context" "github.com/influxdata/influxdb/v2/inmem" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/kv/migration/all" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/tenant" @@ -97,9 +99,9 @@ func TestAuthorizationService_ReadAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, authorizations: []*influxdb.Authorization{}, }, @@ -125,9 +127,9 @@ func TestAuthorizationService_ReadAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, authorizations: []*influxdb.Authorization{}, }, @@ -137,7 +139,7 @@ func TestAuthorizationService_ReadAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { + m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { return &influxdb.Authorization{ ID: id, UserID: 1, @@ -252,9 +254,9 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -279,9 +281,9 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -290,7 +292,7 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { + m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { return &influxdb.Authorization{ ID: id, UserID: 1, @@ -300,10 +302,10 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { return nil } - m.DeleteAuthorizationFn = func(ctx context.Context, id influxdb.ID) error { + m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { return nil } - m.UpdateAuthorizationFn = func(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { + m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { return nil, nil } // set up tenant service @@ -391,9 +393,9 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000001/authorizations is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -418,9 +420,9 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -429,7 +431,7 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { + m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { return &influxdb.Authorization{ ID: id, UserID: 1, @@ -439,10 +441,10 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { return nil } - m.DeleteAuthorizationFn = func(ctx context.Context, id influxdb.ID) error { + m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { return nil } - m.UpdateAuthorizationFn = func(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { + m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { return nil, nil } // set up tenant service diff --git a/authorization/middleware_logging.go b/authorization/middleware_logging.go index 411a8ef164e..42a4ac24988 100644 --- a/authorization/middleware_logging.go +++ b/authorization/middleware_logging.go @@ -6,6 +6,7 @@ import ( "time" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" "go.uber.org/zap" ) @@ -36,7 +37,7 @@ func (l *AuthLogger) CreateAuthorization(ctx context.Context, a *influxdb.Author return l.authService.CreateAuthorization(ctx, a) } -func (l *AuthLogger) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (a *influxdb.Authorization, err error) { +func (l *AuthLogger) FindAuthorizationByID(ctx context.Context, id platform.ID) (a *influxdb.Authorization, err error) { defer func(start time.Time) { dur := zap.Duration("took", time.Since(start)) if err != nil { @@ -74,7 +75,7 @@ func (l *AuthLogger) FindAuthorizations(ctx context.Context, filter influxdb.Aut return l.authService.FindAuthorizations(ctx, filter) } -func (l *AuthLogger) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (a *influxdb.Authorization, err error) { +func (l *AuthLogger) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (a *influxdb.Authorization, err error) { defer func(start time.Time) { dur := zap.Duration("took", time.Since(start)) if err != nil { @@ -86,7 +87,7 @@ func (l *AuthLogger) UpdateAuthorization(ctx context.Context, id influxdb.ID, up return l.authService.UpdateAuthorization(ctx, id, upd) } -func (l *AuthLogger) DeleteAuthorization(ctx context.Context, id influxdb.ID) (err error) { +func (l *AuthLogger) DeleteAuthorization(ctx context.Context, id platform.ID) (err error) { defer func(start time.Time) { dur := zap.Duration("took", time.Since(start)) if err != nil { diff --git a/authorization/middleware_metrics.go b/authorization/middleware_metrics.go index 55f3846acac..b541f672e0f 100644 --- a/authorization/middleware_metrics.go +++ b/authorization/middleware_metrics.go @@ -5,6 +5,7 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/kit/metric" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/prometheus/client_golang/prometheus" ) @@ -31,7 +32,7 @@ func (m *AuthMetrics) CreateAuthorization(ctx context.Context, a *influxdb.Autho return rec(err) } -func (m *AuthMetrics) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { +func (m *AuthMetrics) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { rec := m.rec.Record("find_authorization_by_id") a, err := m.authService.FindAuthorizationByID(ctx, id) return a, rec(err) @@ -47,13 +48,13 @@ func (m *AuthMetrics) FindAuthorizations(ctx context.Context, filter influxdb.Au return a, n, rec(err) } -func (m *AuthMetrics) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { +func (m *AuthMetrics) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { rec := m.rec.Record("update_authorization") a, err := m.authService.UpdateAuthorization(ctx, id, upd) return a, rec(err) } -func (m *AuthMetrics) DeleteAuthorization(ctx context.Context, id influxdb.ID) error { +func (m *AuthMetrics) DeleteAuthorization(ctx context.Context, id platform.ID) error { rec := m.rec.Record("delete_authorization") err := m.authService.DeleteAuthorization(ctx, id) return rec(err) diff --git a/authorization/mock_tenant.go b/authorization/mock_tenant.go index 576d62ecf46..42c084b25ab 100644 --- a/authorization/mock_tenant.go +++ b/authorization/mock_tenant.go @@ -4,19 +4,20 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) // tenantService is a mock implementation of an authorization.tenantService type tenantService struct { - FindUserByIDFn func(context.Context, influxdb.ID) (*influxdb.User, error) + FindUserByIDFn func(context.Context, platform.ID) (*influxdb.User, error) FindUserFn func(context.Context, influxdb.UserFilter) (*influxdb.User, error) - FindOrganizationByIDF func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) + FindOrganizationByIDF func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) FindOrganizationF func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) - FindBucketByIDFn func(context.Context, influxdb.ID) (*influxdb.Bucket, error) + FindBucketByIDFn func(context.Context, platform.ID) (*influxdb.Bucket, error) } // FindUserByID returns a single User by ID. -func (s *tenantService) FindUserByID(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { +func (s *tenantService) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { return s.FindUserByIDFn(ctx, id) } @@ -26,7 +27,7 @@ func (s *tenantService) FindUser(ctx context.Context, filter influxdb.UserFilter } //FindOrganizationByID calls FindOrganizationByIDF. -func (s *tenantService) FindOrganizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { +func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { return s.FindOrganizationByIDF(ctx, id) } @@ -35,6 +36,6 @@ func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.Or return s.FindOrganizationF(ctx, filter) } -func (s *tenantService) FindBucketByID(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) { +func (s *tenantService) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { return s.FindBucketByIDFn(ctx, id) } diff --git a/authorization/service.go b/authorization/service.go index 5558a7a5ef5..196c4a7b9e4 100644 --- a/authorization/service.go +++ b/authorization/service.go @@ -5,6 +5,8 @@ import ( "time" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/kv" "github.com/influxdata/influxdb/v2/rand" ) @@ -27,7 +29,7 @@ func NewService(st *Store, ts TenantService) influxdb.AuthorizationService { func (s *Service) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { if err := a.Valid(); err != nil { - return &influxdb.Error{ + return &errors.Error{ Err: err, } } @@ -53,7 +55,7 @@ func (s *Service) CreateAuthorization(ctx context.Context, a *influxdb.Authoriza if a.Token == "" { token, err := s.tokenGenerator.Token() if err != nil { - return &influxdb.Error{ + return &errors.Error{ Err: err, } } @@ -69,7 +71,7 @@ func (s *Service) CreateAuthorization(ctx context.Context, a *influxdb.Authoriza }) } -func (s *Service) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { +func (s *Service) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { var a *influxdb.Authorization err := s.store.View(ctx, func(tx kv.Tx) error { auth, err := s.store.GetAuthorizationByID(ctx, tx, id) @@ -124,7 +126,7 @@ func (s *Service) FindAuthorizations(ctx context.Context, filter influxdb.Author return nil }) if err != nil { - return nil, 0, &influxdb.Error{ + return nil, 0, &errors.Error{ Err: err, } } @@ -143,7 +145,7 @@ func (s *Service) FindAuthorizations(ctx context.Context, filter influxdb.Author return nil }) if err != nil { - return nil, 0, &influxdb.Error{ + return nil, 0, &errors.Error{ Err: err, } } @@ -162,7 +164,7 @@ func (s *Service) FindAuthorizations(ctx context.Context, filter influxdb.Author }) if err != nil { - return nil, 0, &influxdb.Error{ + return nil, 0, &errors.Error{ Err: err, } } @@ -171,7 +173,7 @@ func (s *Service) FindAuthorizations(ctx context.Context, filter influxdb.Author } // UpdateAuthorization updates the status and description if available. -func (s *Service) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { +func (s *Service) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { var auth *influxdb.Authorization err := s.store.View(ctx, func(tx kv.Tx) error { a, e := s.store.GetAuthorizationByID(ctx, tx, id) @@ -183,8 +185,8 @@ func (s *Service) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd * }) if err != nil { - return nil, &influxdb.Error{ - Code: influxdb.ENotFound, + return nil, &errors.Error{ + Code: errors.ENotFound, Err: err, } } @@ -209,7 +211,7 @@ func (s *Service) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd * return auth, err } -func (s *Service) DeleteAuthorization(ctx context.Context, id influxdb.ID) error { +func (s *Service) DeleteAuthorization(ctx context.Context, id platform.ID) error { return s.store.Update(ctx, func(tx kv.Tx) (err error) { return s.store.DeleteAuthorization(ctx, tx, id) }) diff --git a/authorization/service_test.go b/authorization/service_test.go index 0a35b963e7a..c729227dddd 100644 --- a/authorization/service_test.go +++ b/authorization/service_test.go @@ -2,27 +2,17 @@ package authorization_test import ( "context" - "errors" - "io/ioutil" - "os" "testing" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/bolt" "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" "github.com/influxdata/influxdb/v2/tenant" influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" ) func initBoltAuthService(f influxdbtesting.AuthorizationFields, t *testing.T) (influxdb.AuthorizationService, string, func()) { - s, closeBolt, err := NewTestBoltStore(t) - if err != nil { - t.Fatalf("failed to create new kv store: %v", err) - } - + s, closeBolt := influxdbtesting.NewTestBoltStore(t) svc, closeSvc := initAuthService(s, f, t) return svc, "service_auth", func() { closeSvc() @@ -71,35 +61,6 @@ func initAuthService(s kv.Store, f influxdbtesting.AuthorizationFields, t *testi } } -func NewTestBoltStore(t *testing.T) (kv.Store, func(), error) { - f, err := ioutil.TempFile("", "influxdata-bolt-") - if err != nil { - return nil, nil, errors.New("unable to open temporary boltdb file") - } - f.Close() - - path := f.Name() - ctx := context.Background() - logger := zaptest.NewLogger(t) - - s := bolt.NewKVStore(logger, path, bolt.WithNoSync) - - if err := s.Open(ctx); err != nil { - return nil, nil, err - } - - if err := all.Up(ctx, logger, s); err != nil { - return nil, nil, err - } - - close := func() { - s.Close() - os.Remove(path) - } - - return s, close, nil -} - func TestBoltAuthService(t *testing.T) { t.Parallel() influxdbtesting.AuthorizationService(initBoltAuthService, t) diff --git a/authorization/storage.go b/authorization/storage.go index e3a06b6135c..4b8c8fdec3b 100644 --- a/authorization/storage.go +++ b/authorization/storage.go @@ -3,7 +3,8 @@ package authorization import ( "context" - "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/kit/tracing" "github.com/influxdata/influxdb/v2/kv" "github.com/influxdata/influxdb/v2/snowflake" @@ -19,7 +20,7 @@ var ( type Store struct { kvStore kv.Store - IDGen influxdb.IDGenerator + IDGen platform.IDGenerator } func NewStore(kvStore kv.Store) (*Store, error) { @@ -56,7 +57,7 @@ func (s *Store) setup() error { // generateSafeID attempts to create ids for buckets // and orgs that are without backslash, commas, and spaces, BUT ALSO do not already exist. -func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte) (influxdb.ID, error) { +func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte) (platform.ID, error) { for i := 0; i < MaxIDGenerationN; i++ { id := s.IDGen.ID() @@ -75,19 +76,19 @@ func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte) (in continue } - return influxdb.InvalidID(), err + return platform.InvalidID(), err } - return influxdb.InvalidID(), ErrFailureGeneratingID + return platform.InvalidID(), ErrFailureGeneratingID } -func (s *Store) uniqueID(ctx context.Context, tx kv.Tx, bucket []byte, id influxdb.ID) error { +func (s *Store) uniqueID(ctx context.Context, tx kv.Tx, bucket []byte, id platform.ID) error { span, _ := tracing.StartSpanFromContext(ctx) defer span.Finish() encodedID, err := id.Encode() if err != nil { - return &influxdb.Error{ - Code: influxdb.EInvalid, + return &errors.Error{ + Code: errors.EInvalid, Err: err, } } diff --git a/authorization/storage_authorization.go b/authorization/storage_authorization.go index e9bc4d535ab..3a25146655d 100644 --- a/authorization/storage_authorization.go +++ b/authorization/storage_authorization.go @@ -6,6 +6,8 @@ import ( "github.com/buger/jsonparser" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/kv" jsonp "github.com/influxdata/influxdb/v2/pkg/jsonparser" ) @@ -29,8 +31,8 @@ func encodeAuthorization(a *influxdb.Authorization) ([]byte, error) { case "": a.Status = influxdb.Active default: - return nil, &influxdb.Error{ - Code: influxdb.EInvalid, + return nil, &errors.Error{ + Code: errors.EInvalid, Msg: "unknown authorization status", } } @@ -72,8 +74,8 @@ func (s *Store) CreateAuthorization(ctx context.Context, tx kv.Tx, a *influxdb.A v, err := encodeAuthorization(a) if err != nil { - return &influxdb.Error{ - Code: influxdb.EInvalid, + return &errors.Error{ + Code: errors.EInvalid, Err: err, } } @@ -89,8 +91,8 @@ func (s *Store) CreateAuthorization(ctx context.Context, tx kv.Tx, a *influxdb.A } if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil { - return &influxdb.Error{ - Code: influxdb.EInternal, + return &errors.Error{ + Code: errors.EInternal, Err: err, } } @@ -101,7 +103,7 @@ func (s *Store) CreateAuthorization(ctx context.Context, tx kv.Tx, a *influxdb.A } if err := b.Put(encodedID, v); err != nil { - return &influxdb.Error{ + return &errors.Error{ Err: err, } } @@ -110,7 +112,7 @@ func (s *Store) CreateAuthorization(ctx context.Context, tx kv.Tx, a *influxdb.A } // GetAuthorization gets an authorization by its ID from the auth bucket in kv -func (s *Store) GetAuthorizationByID(ctx context.Context, tx kv.Tx, id influxdb.ID) (*influxdb.Authorization, error) { +func (s *Store) GetAuthorizationByID(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.Authorization, error) { encodedID, err := id.Encode() if err != nil { return nil, ErrInvalidAuthID @@ -132,8 +134,8 @@ func (s *Store) GetAuthorizationByID(ctx context.Context, tx kv.Tx, id influxdb. a := &influxdb.Authorization{} if err := decodeAuthorization(v, a); err != nil { - return nil, &influxdb.Error{ - Code: influxdb.EInvalid, + return nil, &errors.Error{ + Code: errors.EInvalid, Err: err, } } @@ -150,16 +152,16 @@ func (s *Store) GetAuthorizationByToken(ctx context.Context, tx kv.Tx, token str // use the token to look up the authorization's ID idKey, err := idx.Get(authIndexKey(token)) if kv.IsNotFound(err) { - return nil, &influxdb.Error{ - Code: influxdb.ENotFound, + return nil, &errors.Error{ + Code: errors.ENotFound, Msg: "authorization not found", } } - var id influxdb.ID + var id platform.ID if err := id.Decode(idKey); err != nil { - return nil, &influxdb.Error{ - Code: influxdb.EInvalid, + return nil, &errors.Error{ + Code: errors.EInvalid, Err: err, } } @@ -221,19 +223,19 @@ func (s *Store) forEachAuthorization(ctx context.Context, tx kv.Tx, pred kv.Curs } // UpdateAuthorization updates the status and description only of an authorization -func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id influxdb.ID, a *influxdb.Authorization) (*influxdb.Authorization, error) { +func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id platform.ID, a *influxdb.Authorization) (*influxdb.Authorization, error) { v, err := encodeAuthorization(a) if err != nil { - return nil, &influxdb.Error{ - Code: influxdb.EInvalid, + return nil, &errors.Error{ + Code: errors.EInvalid, Err: err, } } encodedID, err := a.ID.Encode() if err != nil { - return nil, &influxdb.Error{ - Code: influxdb.ENotFound, + return nil, &errors.Error{ + Code: errors.ENotFound, Err: err, } } @@ -244,8 +246,8 @@ func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id influxdb.I } if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil { - return nil, &influxdb.Error{ - Code: influxdb.EInternal, + return nil, &errors.Error{ + Code: errors.EInternal, Err: err, } } @@ -256,7 +258,7 @@ func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id influxdb.I } if err := b.Put(encodedID, v); err != nil { - return nil, &influxdb.Error{ + return nil, &errors.Error{ Err: err, } } @@ -266,7 +268,7 @@ func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id influxdb.I } // DeleteAuthorization removes an authorization from storage -func (s *Store) DeleteAuthorization(ctx context.Context, tx kv.Tx, id influxdb.ID) error { +func (s *Store) DeleteAuthorization(ctx context.Context, tx kv.Tx, id platform.ID) error { a, err := s.GetAuthorizationByID(ctx, tx, id) if err != nil { return err @@ -332,7 +334,7 @@ func unique(ctx context.Context, tx kv.Tx, indexBucket, indexKey []byte) error { } // uniqueID returns nil if the ID provided is unique, returns an error otherwise -func uniqueID(ctx context.Context, tx kv.Tx, id influxdb.ID) error { +func uniqueID(ctx context.Context, tx kv.Tx, id platform.ID) error { encodedID, err := id.Encode() if err != nil { return ErrInvalidAuthID diff --git a/authorization/storage_authorization_test.go b/authorization/storage_authorization_test.go index f03a53cfcf8..79406a2d249 100644 --- a/authorization/storage_authorization_test.go +++ b/authorization/storage_authorization_test.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorization" "github.com/influxdata/influxdb/v2/inmem" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/kv" "github.com/influxdata/influxdb/v2/kv/migration/all" "go.uber.org/zap/zaptest" @@ -18,10 +19,10 @@ func TestAuth(t *testing.T) { setup := func(t *testing.T, store *authorization.Store, tx kv.Tx) { for i := 1; i <= 10; i++ { err := store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: influxdb.ID(i), + ID: platform.ID(i), Token: fmt.Sprintf("randomtoken%d", i), - OrgID: influxdb.ID(i), - UserID: influxdb.ID(i), + OrgID: platform.ID(i), + UserID: platform.ID(i), Status: influxdb.Active, }) @@ -53,10 +54,10 @@ func TestAuth(t *testing.T) { expected := []*influxdb.Authorization{} for i := 1; i <= 10; i++ { expected = append(expected, &influxdb.Authorization{ - ID: influxdb.ID(i), + ID: platform.ID(i), Token: fmt.Sprintf("randomtoken%d", i), - OrgID: influxdb.ID(i), - UserID: influxdb.ID(i), + OrgID: platform.ID(i), + UserID: platform.ID(i), Status: "active", }) } @@ -66,10 +67,10 @@ func TestAuth(t *testing.T) { // should not be able to create two authorizations with identical tokens err = store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: influxdb.ID(1), + ID: platform.ID(1), Token: fmt.Sprintf("randomtoken%d", 1), - OrgID: influxdb.ID(1), - UserID: influxdb.ID(1), + OrgID: platform.ID(1), + UserID: platform.ID(1), }) if err == nil { t.Fatalf("expected to be unable to create authorizations with identical tokens") @@ -82,14 +83,14 @@ func TestAuth(t *testing.T) { results: func(t *testing.T, store *authorization.Store, tx kv.Tx) { for i := 1; i <= 10; i++ { expectedAuth := &influxdb.Authorization{ - ID: influxdb.ID(i), + ID: platform.ID(i), Token: fmt.Sprintf("randomtoken%d", i), - OrgID: influxdb.ID(i), - UserID: influxdb.ID(i), + OrgID: platform.ID(i), + UserID: platform.ID(i), Status: influxdb.Active, } - authByID, err := store.GetAuthorizationByID(context.Background(), tx, influxdb.ID(i)) + authByID, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) if err != nil { t.Fatalf("Unexpectedly could not acquire Authorization by ID [Error]: %v", err) } @@ -115,14 +116,14 @@ func TestAuth(t *testing.T) { setup: setup, update: func(t *testing.T, store *authorization.Store, tx kv.Tx) { for i := 1; i <= 10; i++ { - auth, err := store.GetAuthorizationByID(context.Background(), tx, influxdb.ID(i)) + auth, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) if err != nil { t.Fatalf("Could not get authorization [Error]: %v", err) } auth.Status = influxdb.Inactive - _, err = store.UpdateAuthorization(context.Background(), tx, influxdb.ID(i), auth) + _, err = store.UpdateAuthorization(context.Background(), tx, platform.ID(i), auth) if err != nil { t.Fatalf("Could not get updated authorization [Error]: %v", err) } @@ -131,16 +132,16 @@ func TestAuth(t *testing.T) { results: func(t *testing.T, store *authorization.Store, tx kv.Tx) { for i := 1; i <= 10; i++ { - auth, err := store.GetAuthorizationByID(context.Background(), tx, influxdb.ID(i)) + auth, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) if err != nil { t.Fatalf("Could not get authorization [Error]: %v", err) } expectedAuth := &influxdb.Authorization{ - ID: influxdb.ID(i), + ID: platform.ID(i), Token: fmt.Sprintf("randomtoken%d", i), - OrgID: influxdb.ID(i), - UserID: influxdb.ID(i), + OrgID: platform.ID(i), + UserID: platform.ID(i), Status: influxdb.Inactive, } @@ -155,7 +156,7 @@ func TestAuth(t *testing.T) { setup: setup, update: func(t *testing.T, store *authorization.Store, tx kv.Tx) { for i := 1; i <= 10; i++ { - err := store.DeleteAuthorization(context.Background(), tx, influxdb.ID(i)) + err := store.DeleteAuthorization(context.Background(), tx, platform.ID(i)) if err != nil { t.Fatalf("Could not delete authorization [Error]: %v", err) } @@ -163,7 +164,7 @@ func TestAuth(t *testing.T) { }, results: func(t *testing.T, store *authorization.Store, tx kv.Tx) { for i := 1; i <= 10; i++ { - _, err := store.GetAuthorizationByID(context.Background(), tx, influxdb.ID(i)) + _, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) if err == nil { t.Fatal("Authorization was not deleted correctly") } diff --git a/authorizer/agent.go b/authorizer/agent.go index 8c40eda8bb6..62cf1b0c94f 100644 --- a/authorizer/agent.go +++ b/authorizer/agent.go @@ -4,6 +4,8 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) // AuthAgent provides a means to authenticate users with resource and their associate actions. It @@ -11,7 +13,7 @@ import ( type AuthAgent struct{} // OrgPermissions identifies if a user has access to the org by the specified action. -func (a *AuthAgent) OrgPermissions(ctx context.Context, orgID influxdb.ID, action influxdb.Action, rest ...influxdb.Action) error { +func (a *AuthAgent) OrgPermissions(ctx context.Context, orgID platform.ID, action influxdb.Action, rest ...influxdb.Action) error { for _, action := range append(rest, action) { var err error switch action { @@ -20,7 +22,7 @@ func (a *AuthAgent) OrgPermissions(ctx context.Context, orgID influxdb.ID, actio case influxdb.WriteAction: _, _, err = AuthorizeWriteOrg(ctx, orgID) default: - err = &influxdb.Error{Code: influxdb.EInvalid, Msg: "invalid action provided: " + string(action)} + err = &errors.Error{Code: errors.EInvalid, Msg: "invalid action provided: " + string(action)} } if err != nil { return err @@ -29,13 +31,13 @@ func (a *AuthAgent) OrgPermissions(ctx context.Context, orgID influxdb.ID, actio return nil } -func (a *AuthAgent) IsWritable(ctx context.Context, orgID influxdb.ID, resType influxdb.ResourceType) error { +func (a *AuthAgent) IsWritable(ctx context.Context, orgID platform.ID, resType influxdb.ResourceType) error { _, _, resTypeErr := AuthorizeOrgWriteResource(ctx, resType, orgID) _, _, orgErr := AuthorizeWriteOrg(ctx, orgID) if resTypeErr != nil && orgErr != nil { - return &influxdb.Error{ - Code: influxdb.EUnauthorized, + return &errors.Error{ + Code: errors.EUnauthorized, Msg: "not authorized to create " + string(resType), } } diff --git a/authorizer/agent_test.go b/authorizer/agent_test.go index 0859260d5fd..12fd914f73f 100644 --- a/authorizer/agent_test.go +++ b/authorizer/agent_test.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" icontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" "github.com/stretchr/testify/require" @@ -17,7 +18,7 @@ func Test_Agent(t *testing.T) { tests := []struct { name string action influxdb.Action - orgID influxdb.ID + orgID platform.ID permissions []influxdb.Permission shouldErr bool }{ @@ -175,7 +176,7 @@ func Test_Agent(t *testing.T) { tests := []struct { name string resourceType influxdb.ResourceType - orgID influxdb.ID + orgID platform.ID permissions []influxdb.Permission shouldErr bool }{ diff --git a/authorizer/annotation.go b/authorizer/annotation.go new file mode 100644 index 00000000000..ef4545a99d3 --- /dev/null +++ b/authorizer/annotation.go @@ -0,0 +1,189 @@ +package authorizer + +import ( + "context" + "fmt" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" +) + +var _ influxdb.AnnotationService = (*AnnotationService)(nil) + +// AnnotationService wraps an influxdb.AnnotationService and authorizes actions +// against it appropriately. +type AnnotationService struct { + s influxdb.AnnotationService +} + +// NewAnnotationService constructs an instance of an authorizing check service +func NewAnnotationService(s influxdb.AnnotationService) *AnnotationService { + return &AnnotationService{ + s: s, + } +} + +// CreateAnnotations checks to see if the authorizer on context has write access for annotations for the provided orgID +func (s *AnnotationService) CreateAnnotations(ctx context.Context, orgID platform.ID, create []influxdb.AnnotationCreate) ([]influxdb.AnnotationEvent, error) { + if _, _, err := AuthorizeCreate(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { + return nil, err + } + + return s.s.CreateAnnotations(ctx, orgID, create) +} + +// ListAnnotations checks to see if the authorizer on context has read access for annotations for the provided orgID +// and then filters the list down to only the resources that are authorized +func (s *AnnotationService) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) ([]influxdb.StoredAnnotation, error) { + if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { + return nil, err + } + + as, err := s.s.ListAnnotations(ctx, orgID, filter) + if err != nil { + return nil, err + } + + as, _, err = AuthorizeFindAnnotations(ctx, as) + return as, err +} + +// GetAnnotation checks to see if the authorizer on context has read access to the requested annotation +func (s *AnnotationService) GetAnnotation(ctx context.Context, id platform.ID) (*influxdb.StoredAnnotation, error) { + a, err := s.s.GetAnnotation(ctx, id) + if err != nil { + return nil, err + } + if _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, id, a.OrgID); err != nil { + return nil, err + } + return a, nil +} + +// DeleteAnnotations checks to see if the authorizer on context has write access to the provided orgID +func (s *AnnotationService) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) error { + if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { + return err + } + return s.s.DeleteAnnotations(ctx, orgID, delete) +} + +// DeleteAnnotation checks to see if the authorizer on context has write access to the requested annotation +func (s *AnnotationService) DeleteAnnotation(ctx context.Context, id platform.ID) error { + a, err := s.s.GetAnnotation(ctx, id) + if err != nil { + return err + } + if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, a.OrgID); err != nil { + return err + } + return s.s.DeleteAnnotation(ctx, id) +} + +// UpdateAnnotation checks to see if the authorizer on context has write access to the requested annotation +func (s *AnnotationService) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (*influxdb.AnnotationEvent, error) { + a, err := s.s.GetAnnotation(ctx, id) + if err != nil { + return nil, err + } + if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, a.OrgID); err != nil { + return nil, err + } + return s.s.UpdateAnnotation(ctx, id, update) +} + +// ListStreams checks to see if the authorizer on context has read access for streams for the provided orgID +// and then filters the list down to only the resources that are authorized +func (s *AnnotationService) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) ([]influxdb.StoredStream, error) { + if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { + return nil, err + } + + ss, err := s.s.ListStreams(ctx, orgID, filter) + if err != nil { + return nil, err + } + + ss, _, err = AuthorizeFindStreams(ctx, ss) + return ss, err +} + +// GetStream checks to see if the authorizer on context has read access to the requested stream +func (s *AnnotationService) GetStream(ctx context.Context, id platform.ID) (*influxdb.StoredStream, error) { + st, err := s.s.GetStream(ctx, id) + if err != nil { + return nil, err + } + if _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, id, st.OrgID); err != nil { + return nil, err + } + return st, nil +} + +func (s *AnnotationService) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { + // We need to know if the request is creating a new stream, or updating an existing stream to check + // permissions appropriately + + // Get the stream by name. An empty slice will be returned if the stream doesn't exist + // note: a given org can only have one stream by the same name. this constraint is enforced in the database schema + streams, err := s.s.ListStreams(ctx, orgID, influxdb.StreamListFilter{ + StreamIncludes: []string{stream.Name}, + }) + if err != nil { + return nil, err + } + + // update an already existing stream + if len(streams) == 1 { + return s.UpdateStream(ctx, streams[0].ID, stream) + } + + // create a new stream if one doesn't already exist + if len(streams) == 0 { + if _, _, err := AuthorizeCreate(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { + return nil, err + } + + return s.s.CreateOrUpdateStream(ctx, orgID, stream) + } + + // if multiple streams were returned somehow, return an error + // this should never happen, so return a server error + return nil, &errors.Error{ + Code: errors.EInternal, + Msg: fmt.Sprintf("more than one stream named %q for org %q", streams[0].Name, orgID), + } +} + +// UpdateStream checks to see if the authorizer on context has write access to the requested stream +func (s *AnnotationService) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { + st, err := s.s.GetStream(ctx, id) + if err != nil { + return nil, err + } + if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, st.OrgID); err != nil { + return nil, err + } + return s.s.UpdateStream(ctx, id, stream) +} + +// DeleteStreams checks to see if the authorizer on context has write access to the provided orgID +func (s *AnnotationService) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) error { + if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { + return err + } + return s.s.DeleteStreams(ctx, orgID, delete) +} + +// DeleteStreamByID checks to see if the authorizer on context has write access to the requested stream +func (s *AnnotationService) DeleteStreamByID(ctx context.Context, id platform.ID) error { + st, err := s.s.GetStream(ctx, id) + if err != nil { + return err + } + if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, st.OrgID); err != nil { + return err + } + return s.s.DeleteStreamByID(ctx, id) +} diff --git a/authorizer/annotation_test.go b/authorizer/annotation_test.go new file mode 100644 index 00000000000..803d5b54c6b --- /dev/null +++ b/authorizer/annotation_test.go @@ -0,0 +1,725 @@ +package authorizer_test + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/authorizer" + influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" + "github.com/influxdata/influxdb/v2/mock" + influxdbtesting "github.com/influxdata/influxdb/v2/testing" + "github.com/stretchr/testify/require" +) + +var ( + annOrgID1 = influxdbtesting.IDPtr(1) + annOrgID2 = influxdbtesting.IDPtr(10) + rID = influxdbtesting.IDPtr(2) +) + +func Test_CreateAnnotations(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantRet []influxdb.AnnotationEvent + wantErr error + }{ + { + "authorized to create annotation(s) with the specified org", + []influxdb.AnnotationEvent{{ID: *rID}}, + nil, + }, + { + "not authorized to create annotation(s) with the specified org", + nil, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + var perm influxdb.Permission + if tt.wantErr == nil { + perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) + svc.EXPECT(). + CreateAnnotations(gomock.Any(), *annOrgID1, []influxdb.AnnotationCreate{{}}). + Return(tt.wantRet, nil) + } else { + perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.CreateAnnotations(ctx, *annOrgID1, []influxdb.AnnotationCreate{{}}) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_ListAnnotations(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantRet []influxdb.StoredAnnotation + wantErr error + }{ + { + "authorized to list annotations for the specified org", + []influxdb.StoredAnnotation{}, + nil, + }, + { + "not authorized to list annotations for the specified org", + nil, + &errors.Error{ + Msg: fmt.Sprintf("read:orgs/%s/annotations is unauthorized", annOrgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + var perm influxdb.Permission + if tt.wantErr == nil { + perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) + svc.EXPECT(). + ListAnnotations(gomock.Any(), *annOrgID1, influxdb.AnnotationListFilter{}). + Return(tt.wantRet, nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.ListAnnotations(ctx, *annOrgID1, influxdb.AnnotationListFilter{}) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_GetAnnotation(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permissionOrg *platform.ID + wantRet *influxdb.StoredAnnotation + wantErr error + }{ + { + "authorized to access annotation by id", + annOrgID1, + &influxdb.StoredAnnotation{ + ID: *rID, + OrgID: *annOrgID1, + }, + nil, + }, + { + "not authorized to access annotation by id", + annOrgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("read:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + GetAnnotation(gomock.Any(), *rID). + Return(&influxdb.StoredAnnotation{ + ID: *rID, + OrgID: *annOrgID1, + }, nil) + + perm := newTestAnnotationsPermission(influxdb.ReadAction, tt.permissionOrg) + + ctx := context.Background() + ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.GetAnnotation(ctx, *rID) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_DeleteAnnotations(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantErr error + }{ + { + "authorized to delete annotations with the specified org", + nil, + }, + { + "not authorized to delete annotations with the specified org", + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + var perm influxdb.Permission + if tt.wantErr == nil { + perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) + svc.EXPECT(). + DeleteAnnotations(gomock.Any(), *annOrgID1, influxdb.AnnotationDeleteFilter{}). + Return(nil) + } else { + perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + err := s.DeleteAnnotations(ctx, *annOrgID1, influxdb.AnnotationDeleteFilter{}) + require.Equal(t, tt.wantErr, err) + }) + } +} + +func Test_DeleteAnnotation(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permissionOrg *platform.ID + wantErr error + }{ + { + "authorized to delete annotation by id", + annOrgID1, + nil, + }, + { + "not authorized to delete annotation by id", + annOrgID2, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + GetAnnotation(gomock.Any(), *rID). + Return(&influxdb.StoredAnnotation{ + ID: *rID, + OrgID: *annOrgID1, + }, nil) + + perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) + + if tt.wantErr == nil { + svc.EXPECT(). + DeleteAnnotation(gomock.Any(), *rID). + Return(nil) + } + + ctx := context.Background() + ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + err := s.DeleteAnnotation(ctx, *rID) + require.Equal(t, tt.wantErr, err) + }) + } +} + +func Test_UpdateAnnotation(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permissionOrg *platform.ID + wantRet *influxdb.AnnotationEvent + wantErr error + }{ + { + "authorized to update annotation by id", + annOrgID1, + &influxdb.AnnotationEvent{}, + nil, + }, + { + "not authorized to update annotation by id", + annOrgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + GetAnnotation(gomock.Any(), *rID). + Return(&influxdb.StoredAnnotation{ + ID: *rID, + OrgID: *annOrgID1, + }, nil) + + perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) + + if tt.wantErr == nil { + svc.EXPECT(). + UpdateAnnotation(gomock.Any(), *rID, influxdb.AnnotationCreate{}). + Return(tt.wantRet, nil) + } + + ctx := context.Background() + ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.UpdateAnnotation(ctx, *rID, influxdb.AnnotationCreate{}) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_ListStreams(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantRet []influxdb.StoredStream + wantErr error + }{ + { + "authorized to list streams for the specified org", + []influxdb.StoredStream{}, + nil, + }, + { + "not authorized to list streams for the specified org", + nil, + &errors.Error{ + Msg: fmt.Sprintf("read:orgs/%s/annotations is unauthorized", annOrgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + var perm influxdb.Permission + if tt.wantErr == nil { + perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) + svc.EXPECT(). + ListStreams(gomock.Any(), *annOrgID1, influxdb.StreamListFilter{}). + Return(tt.wantRet, nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.ListStreams(ctx, *annOrgID1, influxdb.StreamListFilter{}) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_GetStream(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permissionOrg *platform.ID + wantRet *influxdb.StoredStream + wantErr error + }{ + { + "authorized to access stream by id", + annOrgID1, + &influxdb.StoredStream{ + ID: *rID, + OrgID: *annOrgID1, + }, + nil, + }, + { + "not authorized to access stream by id", + annOrgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("read:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + GetStream(gomock.Any(), *rID). + Return(&influxdb.StoredStream{ + ID: *rID, + OrgID: *annOrgID1, + }, nil) + + perm := newTestAnnotationsPermission(influxdb.ReadAction, tt.permissionOrg) + + ctx := context.Background() + ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.GetStream(ctx, *rID) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_CreateOrUpdateStream(t *testing.T) { + t.Parallel() + + var ( + testStreamName = "test stream" + testStream = influxdb.Stream{ + Name: testStreamName, + } + ) + + t.Run("updating a stream", func(t *testing.T) { + tests := []struct { + name string + permissionOrg *platform.ID + existingStreams []influxdb.StoredStream + getStreamRet *influxdb.StoredStream + wantRet *influxdb.ReadStream + wantErr error + }{ + { + "authorized to update an existing stream", + annOrgID1, + []influxdb.StoredStream{{ID: *rID, OrgID: *annOrgID1}}, + &influxdb.StoredStream{ID: *rID, OrgID: *annOrgID1}, + &influxdb.ReadStream{ID: *rID}, + nil, + }, + { + "not authorized to update an existing stream", + annOrgID2, + []influxdb.StoredStream{{ID: *rID, OrgID: *annOrgID1}}, + &influxdb.StoredStream{ID: *rID, OrgID: *annOrgID1}, + nil, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + ListStreams(gomock.Any(), *tt.permissionOrg, influxdb.StreamListFilter{ + StreamIncludes: []string{testStreamName}, + }). + Return(tt.existingStreams, nil) + + svc.EXPECT(). + GetStream(gomock.Any(), tt.existingStreams[0].ID). + Return(tt.getStreamRet, nil) + + if tt.wantErr == nil { + svc.EXPECT(). + UpdateStream(gomock.Any(), tt.existingStreams[0].ID, testStream). + Return(tt.wantRet, tt.wantErr) + } + + perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.CreateOrUpdateStream(ctx, *tt.permissionOrg, testStream) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } + }) + + t.Run("creating a stream", func(t *testing.T) { + tests := []struct { + name string + existingStreams []influxdb.StoredStream + wantRet *influxdb.ReadStream + wantErr error + }{ + { + "authorized to create a stream with the specified org", + []influxdb.StoredStream{}, + &influxdb.ReadStream{}, + nil, + }, + { + "not authorized to create a stream with the specified org", + []influxdb.StoredStream{}, + nil, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + ListStreams(gomock.Any(), *annOrgID1, influxdb.StreamListFilter{ + StreamIncludes: []string{testStreamName}, + }). + Return(tt.existingStreams, nil) + + var perm influxdb.Permission + if tt.wantErr == nil { + perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) + svc.EXPECT(). + CreateOrUpdateStream(gomock.Any(), *annOrgID1, testStream). + Return(tt.wantRet, nil) + } else { + perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.CreateOrUpdateStream(ctx, *annOrgID1, testStream) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } + }) + + t.Run("stream list longer than 1 returns a server error", func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + ListStreams(gomock.Any(), *annOrgID1, influxdb.StreamListFilter{ + StreamIncludes: []string{testStreamName}, + }). + Return([]influxdb.StoredStream{{Name: testStreamName}, {Name: testStreamName}}, nil) + + wantErr := &errors.Error{ + Code: errors.EInternal, + Msg: fmt.Sprintf("more than one stream named %q for org %q", testStreamName, annOrgID1), + } + + got, err := s.CreateOrUpdateStream(context.Background(), *annOrgID1, testStream) + require.Nil(t, got) + require.Equal(t, err, wantErr) + }) +} + +func Test_UpdateStream(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permissionOrg *platform.ID + wantRet *influxdb.ReadStream + wantErr error + }{ + { + "authorized to update stream by id", + annOrgID1, + &influxdb.ReadStream{}, + nil, + }, + { + "not authorized to update stream by id", + annOrgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + GetStream(gomock.Any(), *rID). + Return(&influxdb.StoredStream{ + ID: *rID, + OrgID: *annOrgID1, + }, nil) + + perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) + + if tt.wantErr == nil { + svc.EXPECT(). + UpdateStream(gomock.Any(), *rID, influxdb.Stream{}). + Return(tt.wantRet, nil) + } + + ctx := context.Background() + ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.UpdateStream(ctx, *rID, influxdb.Stream{}) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_DeleteStreams(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + wantErr error + }{ + { + "authorized to delete streams with the specified org", + nil, + }, + { + "not authorized to delete streams with the specified org", + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + var perm influxdb.Permission + if tt.wantErr == nil { + perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) + svc.EXPECT(). + DeleteStreams(gomock.Any(), *annOrgID1, influxdb.BasicStream{}). + Return(nil) + } else { + perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + err := s.DeleteStreams(ctx, *annOrgID1, influxdb.BasicStream{}) + require.Equal(t, tt.wantErr, err) + }) + } +} + +func Test_DeleteStreamByID(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permissionOrg *platform.ID + wantErr error + }{ + { + "authorized to delete stream by id", + annOrgID1, + nil, + }, + { + "not authorized to delete stream by id", + annOrgID2, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockAnnotationService(ctrlr) + s := authorizer.NewAnnotationService(svc) + + svc.EXPECT(). + GetStream(gomock.Any(), *rID). + Return(&influxdb.StoredStream{ + ID: *rID, + OrgID: *annOrgID1, + }, nil) + + perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) + + if tt.wantErr == nil { + svc.EXPECT(). + DeleteStreamByID(gomock.Any(), *rID). + Return(nil) + } + + ctx := context.Background() + ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + err := s.DeleteStreamByID(ctx, *rID) + require.Equal(t, tt.wantErr, err) + }) + } +} + +func newTestAnnotationsPermission(action influxdb.Action, orgID *platform.ID) influxdb.Permission { + return influxdb.Permission{ + Action: action, + Resource: influxdb.Resource{ + Type: influxdb.AnnotationsResourceType, + OrgID: orgID, + }, + } +} diff --git a/authorizer/auth.go b/authorizer/auth.go index 32663effabc..3e70d0fbe5d 100644 --- a/authorizer/auth.go +++ b/authorizer/auth.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) var _ influxdb.AuthorizationService = (*AuthorizationService)(nil) @@ -23,7 +25,7 @@ func NewAuthorizationService(s influxdb.AuthorizationService) *AuthorizationServ } // FindAuthorizationByID checks to see if the authorizer on context has read access to the id provided. -func (s *AuthorizationService) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { +func (s *AuthorizationService) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { a, err := s.s.FindAuthorizationByID(ctx, id) if err != nil { return nil, err @@ -78,7 +80,7 @@ func (s *AuthorizationService) CreateAuthorization(ctx context.Context, a *influ } // UpdateAuthorization checks to see if the authorizer on context has write access to the authorization provided. -func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { +func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { a, err := s.s.FindAuthorizationByID(ctx, id) if err != nil { return nil, err @@ -93,7 +95,7 @@ func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id influ } // DeleteAuthorization checks to see if the authorizer on context has write access to the authorization provided. -func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id influxdb.ID) error { +func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platform.ID) error { a, err := s.s.FindAuthorizationByID(ctx, id) if err != nil { return err @@ -111,10 +113,10 @@ func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id influ func VerifyPermissions(ctx context.Context, ps []influxdb.Permission) error { for _, p := range ps { if err := IsAllowed(ctx, p); err != nil { - return &influxdb.Error{ + return &errors.Error{ Err: err, Msg: fmt.Sprintf("permission %s is not allowed", p), - Code: influxdb.EForbidden, + Code: errors.EForbidden, } } } diff --git a/authorizer/auth_test.go b/authorizer/auth_test.go index c75c37c93dc..3f1538db230 100644 --- a/authorizer/auth_test.go +++ b/authorizer/auth_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -93,9 +95,9 @@ func TestAuthorizationService_ReadAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, authorizations: []*influxdb.Authorization{}, }, @@ -121,9 +123,9 @@ func TestAuthorizationService_ReadAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, authorizations: []*influxdb.Authorization{}, }, @@ -133,7 +135,7 @@ func TestAuthorizationService_ReadAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { + m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { return &influxdb.Authorization{ ID: id, UserID: 1, @@ -241,9 +243,9 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -268,9 +270,9 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -279,7 +281,7 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { + m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { return &influxdb.Authorization{ ID: id, UserID: 1, @@ -289,10 +291,10 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) { m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { return nil } - m.DeleteAuthorizationFn = func(ctx context.Context, id influxdb.ID) error { + m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { return nil } - m.UpdateAuthorizationFn = func(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { + m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { return nil, nil } s := authorizer.NewAuthorizationService(m) @@ -372,9 +374,9 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000001/authorizations is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -399,9 +401,9 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -410,7 +412,7 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) { + m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { return &influxdb.Authorization{ ID: id, UserID: 1, @@ -420,10 +422,10 @@ func TestAuthorizationService_CreateAuthorization(t *testing.T) { m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { return nil } - m.DeleteAuthorizationFn = func(ctx context.Context, id influxdb.ID) error { + m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { return nil } - m.UpdateAuthorizationFn = func(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { + m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { return nil, nil } s := authorizer.NewAuthorizationService(m) diff --git a/authorizer/authorize.go b/authorizer/authorize.go index ab968101091..537f45f9f6b 100644 --- a/authorizer/authorize.go +++ b/authorizer/authorize.go @@ -6,6 +6,8 @@ import ( "github.com/influxdata/influxdb/v2" icontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) func isAllowedAll(a influxdb.Authorizer, permissions []influxdb.Permission) error { @@ -16,8 +18,8 @@ func isAllowedAll(a influxdb.Authorizer, permissions []influxdb.Permission) erro for _, p := range permissions { if !pset.Allowed(p) { - return &influxdb.Error{ - Code: influxdb.EUnauthorized, + return &errors.Error{ + Code: errors.EUnauthorized, Msg: fmt.Sprintf("%s is unauthorized", p), } } @@ -61,13 +63,13 @@ func IsAllowedAny(ctx context.Context, permissions []influxdb.Permission) error return nil } } - return &influxdb.Error{ - Code: influxdb.EUnauthorized, + return &errors.Error{ + Code: errors.EUnauthorized, Msg: fmt.Sprintf("none of %v is authorized", permissions), } } -func authorize(ctx context.Context, a influxdb.Action, rt influxdb.ResourceType, rid, oid *influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func authorize(ctx context.Context, a influxdb.Action, rt influxdb.ResourceType, rid, oid *platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { var p *influxdb.Permission var err error if rid != nil && oid != nil { @@ -89,7 +91,7 @@ func authorize(ctx context.Context, a influxdb.Action, rt influxdb.ResourceType, return auth, *p, isAllowed(auth, *p) } -func authorizeReadSystemBucket(ctx context.Context, bid, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func authorizeReadSystemBucket(ctx context.Context, bid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return AuthorizeReadOrg(ctx, oid) } @@ -98,7 +100,7 @@ func authorizeReadSystemBucket(ctx context.Context, bid, oid influxdb.ID) (influ // AuthorizeRead(ctx, influxdb.BucketsResourceType, b.ID, b.OrgID) // use: // AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID) -func AuthorizeReadBucket(ctx context.Context, bt influxdb.BucketType, bid, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeReadBucket(ctx context.Context, bt influxdb.BucketType, bid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { switch bt { case influxdb.BucketTypeSystem: return authorizeReadSystemBucket(ctx, bid, oid) @@ -109,54 +111,54 @@ func AuthorizeReadBucket(ctx context.Context, bt influxdb.BucketType, bid, oid i // AuthorizeRead authorizes the user in the context to read the specified resource (identified by its type, ID, and orgID). // NOTE: authorization will pass even if the user only has permissions for the resource type and organization ID only. -func AuthorizeRead(ctx context.Context, rt influxdb.ResourceType, rid, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeRead(ctx context.Context, rt influxdb.ResourceType, rid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.ReadAction, rt, &rid, &oid) } // AuthorizeWrite authorizes the user in the context to write the specified resource (identified by its type, ID, and orgID). // NOTE: authorization will pass even if the user only has permissions for the resource type and organization ID only. -func AuthorizeWrite(ctx context.Context, rt influxdb.ResourceType, rid, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeWrite(ctx context.Context, rt influxdb.ResourceType, rid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.WriteAction, rt, &rid, &oid) } // AuthorizeRead authorizes the user in the context to read the specified resource (identified by its type, ID). // NOTE: authorization will pass only if the user has a specific permission for the given resource. -func AuthorizeReadResource(ctx context.Context, rt influxdb.ResourceType, rid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeReadResource(ctx context.Context, rt influxdb.ResourceType, rid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.ReadAction, rt, &rid, nil) } // AuthorizeWrite authorizes the user in the context to write the specified resource (identified by its type, ID). // NOTE: authorization will pass only if the user has a specific permission for the given resource. -func AuthorizeWriteResource(ctx context.Context, rt influxdb.ResourceType, rid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeWriteResource(ctx context.Context, rt influxdb.ResourceType, rid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.WriteAction, rt, &rid, nil) } // AuthorizeOrgReadResource authorizes the given org to read the resources of the given type. // NOTE: this is pretty much the same as AuthorizeRead, in the case that the resource ID is ignored. // Use it in the case that you do not know which resource in particular you want to give access to. -func AuthorizeOrgReadResource(ctx context.Context, rt influxdb.ResourceType, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeOrgReadResource(ctx context.Context, rt influxdb.ResourceType, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.ReadAction, rt, nil, &oid) } // AuthorizeOrgWriteResource authorizes the given org to write the resources of the given type. // NOTE: this is pretty much the same as AuthorizeWrite, in the case that the resource ID is ignored. // Use it in the case that you do not know which resource in particular you want to give access to. -func AuthorizeOrgWriteResource(ctx context.Context, rt influxdb.ResourceType, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeOrgWriteResource(ctx context.Context, rt influxdb.ResourceType, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.WriteAction, rt, nil, &oid) } // AuthorizeCreate authorizes a user to create a resource of the given type for the given org. -func AuthorizeCreate(ctx context.Context, rt influxdb.ResourceType, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeCreate(ctx context.Context, rt influxdb.ResourceType, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return AuthorizeOrgWriteResource(ctx, rt, oid) } // AuthorizeReadOrg authorizes the user to read the given org. -func AuthorizeReadOrg(ctx context.Context, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeReadOrg(ctx context.Context, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.ReadAction, influxdb.OrgsResourceType, &oid, nil) } // AuthorizeWriteOrg authorizes the user to write the given org. -func AuthorizeWriteOrg(ctx context.Context, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { +func AuthorizeWriteOrg(ctx context.Context, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { return authorize(ctx, influxdb.WriteAction, influxdb.OrgsResourceType, &oid, nil) } diff --git a/authorizer/authorize_find.go b/authorizer/authorize_find.go index 9fa9fe23cc0..aa6c89fadd9 100644 --- a/authorizer/authorize_find.go +++ b/authorizer/authorize_find.go @@ -4,24 +4,21 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform/errors" + "github.com/influxdata/influxdb/v2/task/taskmodel" ) // AuthorizeFindDBRPs takes the given items and returns only the ones that the user is authorized to access. -func AuthorizeFindDBRPs(ctx context.Context, rs []*influxdb.DBRPMappingV2) ([]*influxdb.DBRPMappingV2, int, error) { +func AuthorizeFindDBRPs(ctx context.Context, rs []*influxdb.DBRPMapping) ([]*influxdb.DBRPMapping, int, error) { // This filters without allocating // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating rrs := rs[:0] for _, r := range rs { - // N.B. we have to check both read and write permissions here to support the legacy write-path, - // which calls AuthorizeFindDBRPs when locating the bucket underlying a DBRP target. _, _, err := AuthorizeRead(ctx, influxdb.BucketsResourceType, r.BucketID, r.OrganizationID) - if err != nil { - _, _, err = AuthorizeWrite(ctx, influxdb.BucketsResourceType, r.BucketID, r.OrganizationID) - } - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -36,17 +33,17 @@ func AuthorizeFindAuthorizations(ctx context.Context, rs []*influxdb.Authorizati rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, r.ID, r.OrgID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } _, _, err = AuthorizeReadResource(ctx, influxdb.UsersResourceType, r.UserID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -61,10 +58,10 @@ func AuthorizeFindBuckets(ctx context.Context, rs []*influxdb.Bucket) ([]*influx rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeReadBucket(ctx, r.Type, r.ID, r.OrgID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -79,10 +76,64 @@ func AuthorizeFindDashboards(ctx context.Context, rs []*influxdb.Dashboard) ([]* rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.DashboardsResourceType, r.ID, r.OrganizationID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { + return nil, 0, err + } + if errors.ErrorCode(err) == errors.EUnauthorized { + continue + } + rrs = append(rrs, r) + } + return rrs, len(rrs), nil +} + +// AuthorizeFindAnnotations takes the given items and returns only the ones that the user is authorized to read. +func AuthorizeFindAnnotations(ctx context.Context, rs []influxdb.StoredAnnotation) ([]influxdb.StoredAnnotation, int, error) { + // This filters without allocating + // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + rrs := rs[:0] + for _, r := range rs { + _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, r.ID, r.OrgID) + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { + return nil, 0, err + } + if errors.ErrorCode(err) == errors.EUnauthorized { + continue + } + rrs = append(rrs, r) + } + return rrs, len(rrs), nil +} + +// AuthorizeFindStreams takes the given items and returns only the ones that the user is authorized to read. +func AuthorizeFindStreams(ctx context.Context, rs []influxdb.StoredStream) ([]influxdb.StoredStream, int, error) { + // This filters without allocating + // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + rrs := rs[:0] + for _, r := range rs { + _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, r.ID, r.OrgID) + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { + return nil, 0, err + } + if errors.ErrorCode(err) == errors.EUnauthorized { + continue + } + rrs = append(rrs, r) + } + return rrs, len(rrs), nil +} + +// AuthorizeFindNotebooks takes the given items and returns only the ones that the user is authorized to read. +func AuthorizeFindNotebooks(ctx context.Context, rs []*influxdb.Notebook) ([]*influxdb.Notebook, int, error) { + // This filters without allocating + // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + rrs := rs[:0] + for _, r := range rs { + _, _, err := AuthorizeRead(ctx, influxdb.NotebooksResourceType, r.ID, r.OrgID) + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -97,10 +148,10 @@ func AuthorizeFindOrganizations(ctx context.Context, rs []*influxdb.Organization rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeReadOrg(ctx, r.ID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -115,10 +166,10 @@ func AuthorizeFindSources(ctx context.Context, rs []*influxdb.Source) ([]*influx rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.SourcesResourceType, r.ID, r.OrganizationID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -127,16 +178,16 @@ func AuthorizeFindSources(ctx context.Context, rs []*influxdb.Source) ([]*influx } // AuthorizeFindTasks takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindTasks(ctx context.Context, rs []*influxdb.Task) ([]*influxdb.Task, int, error) { +func AuthorizeFindTasks(ctx context.Context, rs []*taskmodel.Task) ([]*taskmodel.Task, int, error) { // This filters without allocating // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.TasksResourceType, r.ID, r.OrganizationID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -151,10 +202,10 @@ func AuthorizeFindTelegrafs(ctx context.Context, rs []*influxdb.TelegrafConfig) rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.TelegrafsResourceType, r.ID, r.OrgID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -169,10 +220,10 @@ func AuthorizeFindUsers(ctx context.Context, rs []*influxdb.User) ([]*influxdb.U rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeReadResource(ctx, influxdb.UsersResourceType, r.ID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -187,10 +238,10 @@ func AuthorizeFindVariables(ctx context.Context, rs []*influxdb.Variable) ([]*in rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.VariablesResourceType, r.ID, r.OrganizationID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -205,10 +256,10 @@ func AuthorizeFindScrapers(ctx context.Context, rs []influxdb.ScraperTarget) ([] rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.ScraperResourceType, r.ID, r.OrgID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -223,10 +274,10 @@ func AuthorizeFindLabels(ctx context.Context, rs []*influxdb.Label) ([]*influxdb rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.LabelsResourceType, r.ID, r.OrgID) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -241,10 +292,10 @@ func AuthorizeFindNotificationRules(ctx context.Context, rs []influxdb.Notificat rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.NotificationRuleResourceType, r.GetID(), r.GetOrgID()) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -259,10 +310,10 @@ func AuthorizeFindNotificationEndpoints(ctx context.Context, rs []influxdb.Notif rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.NotificationEndpointResourceType, r.GetID(), r.GetOrgID()) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) @@ -277,10 +328,10 @@ func AuthorizeFindChecks(ctx context.Context, rs []influxdb.Check) ([]influxdb.C rrs := rs[:0] for _, r := range rs { _, _, err := AuthorizeRead(ctx, influxdb.ChecksResourceType, r.GetID(), r.GetOrgID()) - if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized { + if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { return nil, 0, err } - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { continue } rrs = append(rrs, r) diff --git a/authorizer/backup.go b/authorizer/backup.go index 05bf9a5bd5d..dbbe94c91df 100644 --- a/authorizer/backup.go +++ b/authorizer/backup.go @@ -43,3 +43,16 @@ func (b BackupService) BackupShard(ctx context.Context, w io.Writer, shardID uin } return b.s.BackupShard(ctx, w, shardID, since) } + +// The Lock and Unlock methods below do not have authorization checks and should only be used +// when appropriate authorization has already been confirmed, such as behind a middleware. They +// are intended to be used for coordinating the locking and unlocking of the kv and sql metadata +// databases during a backup. They are made available here to allow the calls to pass-through to the +// underlying service. +func (b BackupService) RLockKVStore() { + b.s.RLockKVStore() +} + +func (b BackupService) RUnlockKVStore() { + b.s.RUnlockKVStore() +} diff --git a/authorizer/bucket.go b/authorizer/bucket.go index df1f3a35374..81dd4334154 100644 --- a/authorizer/bucket.go +++ b/authorizer/bucket.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/kit/tracing" ) @@ -23,7 +24,7 @@ func NewBucketService(s influxdb.BucketService) *BucketService { } // FindBucketByID checks to see if the authorizer on context has read access to the id provided. -func (s *BucketService) FindBucketByID(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) { +func (s *BucketService) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -38,7 +39,7 @@ func (s *BucketService) FindBucketByID(ctx context.Context, id influxdb.ID) (*in } // FindBucketByName returns a bucket by name for a particular organization. -func (s *BucketService) FindBucketByName(ctx context.Context, orgID influxdb.ID, n string) (*influxdb.Bucket, error) { +func (s *BucketService) FindBucketByName(ctx context.Context, orgID platform.ID, n string) (*influxdb.Bucket, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -93,7 +94,7 @@ func (s *BucketService) CreateBucket(ctx context.Context, b *influxdb.Bucket) er } // UpdateBucket checks to see if the authorizer on context has write access to the bucket provided. -func (s *BucketService) UpdateBucket(ctx context.Context, id influxdb.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { +func (s *BucketService) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { b, err := s.s.FindBucketByID(ctx, id) if err != nil { return nil, err @@ -105,7 +106,7 @@ func (s *BucketService) UpdateBucket(ctx context.Context, id influxdb.ID, upd in } // DeleteBucket checks to see if the authorizer on context has write access to the bucket provided. -func (s *BucketService) DeleteBucket(ctx context.Context, id influxdb.ID) error { +func (s *BucketService) DeleteBucket(ctx context.Context, id platform.ID) error { b, err := s.s.FindBucketByID(ctx, id) if err != nil { return err diff --git a/authorizer/bucket_test.go b/authorizer/bucket_test.go index c14ebe5ed5c..81faf2e01e4 100644 --- a/authorizer/bucket_test.go +++ b/authorizer/bucket_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -33,7 +35,7 @@ func TestBucketService_FindBucketByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -49,7 +51,7 @@ func TestBucketService_FindBucketByID(t *testing.T) { name: "authorized to access id", fields: fields{ BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: id, OrgID: 10, @@ -75,7 +77,7 @@ func TestBucketService_FindBucketByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: id, OrgID: 10, @@ -94,9 +96,9 @@ func TestBucketService_FindBucketByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -179,9 +181,9 @@ func TestBucketService_FindBucket(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -334,7 +336,7 @@ func TestBucketService_UpdateBucket(t *testing.T) { BucketService influxdb.BucketService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -351,13 +353,13 @@ func TestBucketService_UpdateBucket(t *testing.T) { name: "authorized to update bucket", fields: fields{ BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: 1, OrgID: 10, }, nil }, - UpdateBucketFn: func(ctx context.Context, id influxdb.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { + UpdateBucketFn: func(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: 1, OrgID: 10, @@ -392,13 +394,13 @@ func TestBucketService_UpdateBucket(t *testing.T) { name: "unauthorized to update bucket", fields: fields{ BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: 1, OrgID: 10, }, nil }, - UpdateBucketFn: func(ctx context.Context, id influxdb.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { + UpdateBucketFn: func(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: 1, OrgID: 10, @@ -419,9 +421,9 @@ func TestBucketService_UpdateBucket(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -445,7 +447,7 @@ func TestBucketService_DeleteBucket(t *testing.T) { BucketService influxdb.BucketService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -462,13 +464,13 @@ func TestBucketService_DeleteBucket(t *testing.T) { name: "authorized to delete bucket", fields: fields{ BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: 1, OrgID: 10, }, nil }, - DeleteBucketFn: func(ctx context.Context, id influxdb.ID) error { + DeleteBucketFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -500,13 +502,13 @@ func TestBucketService_DeleteBucket(t *testing.T) { name: "unauthorized to delete bucket", fields: fields{ BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Bucket, error) { + FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { return &influxdb.Bucket{ ID: 1, OrgID: 10, }, nil }, - DeleteBucketFn: func(ctx context.Context, id influxdb.ID) error { + DeleteBucketFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -524,9 +526,9 @@ func TestBucketService_DeleteBucket(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -551,7 +553,7 @@ func TestBucketService_CreateBucket(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -606,9 +608,9 @@ func TestBucketService_CreateBucket(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/buckets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/check.go b/authorizer/check.go index c5a5f66f151..0f0744e3948 100644 --- a/authorizer/check.go +++ b/authorizer/check.go @@ -4,6 +4,8 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/task/taskmodel" ) var _ influxdb.CheckService = (*CheckService)(nil) @@ -14,7 +16,7 @@ type CheckService struct { s influxdb.CheckService influxdb.UserResourceMappingService influxdb.OrganizationService - influxdb.TaskService + taskmodel.TaskService } // NewCheckService constructs an instance of an authorizing check service. @@ -27,7 +29,7 @@ func NewCheckService(s influxdb.CheckService, urm influxdb.UserResourceMappingSe } // FindCheckByID checks to see if the authorizer on context has read access to the id provided. -func (s *CheckService) FindCheckByID(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { +func (s *CheckService) FindCheckByID(ctx context.Context, id platform.ID) (influxdb.Check, error) { chk, err := s.s.FindCheckByID(ctx, id) if err != nil { return nil, err @@ -62,7 +64,7 @@ func (s *CheckService) FindCheck(ctx context.Context, filter influxdb.CheckFilte } // CreateCheck checks to see if the authorizer on context has write access to the global check resource. -func (s *CheckService) CreateCheck(ctx context.Context, chk influxdb.CheckCreate, userID influxdb.ID) error { +func (s *CheckService) CreateCheck(ctx context.Context, chk influxdb.CheckCreate, userID platform.ID) error { if _, _, err := AuthorizeCreate(ctx, influxdb.ChecksResourceType, chk.GetOrgID()); err != nil { return err } @@ -70,7 +72,7 @@ func (s *CheckService) CreateCheck(ctx context.Context, chk influxdb.CheckCreate } // UpdateCheck checks to see if the authorizer on context has write access to the check provided. -func (s *CheckService) UpdateCheck(ctx context.Context, id influxdb.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { +func (s *CheckService) UpdateCheck(ctx context.Context, id platform.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { chk, err := s.FindCheckByID(ctx, id) if err != nil { return nil, err @@ -82,7 +84,7 @@ func (s *CheckService) UpdateCheck(ctx context.Context, id influxdb.ID, upd infl } // PatchCheck checks to see if the authorizer on context has write access to the check provided. -func (s *CheckService) PatchCheck(ctx context.Context, id influxdb.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { +func (s *CheckService) PatchCheck(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { chk, err := s.FindCheckByID(ctx, id) if err != nil { return nil, err @@ -94,7 +96,7 @@ func (s *CheckService) PatchCheck(ctx context.Context, id influxdb.ID, upd influ } // DeleteCheck checks to see if the authorizer on context has write access to the check provided. -func (s *CheckService) DeleteCheck(ctx context.Context, id influxdb.ID) error { +func (s *CheckService) DeleteCheck(ctx context.Context, id platform.ID) error { chk, err := s.FindCheckByID(ctx, id) if err != nil { return err diff --git a/authorizer/check_test.go b/authorizer/check_test.go index 47f73d906b1..f90c1a91d80 100644 --- a/authorizer/check_test.go +++ b/authorizer/check_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/notification/check" influxdbtesting "github.com/influxdata/influxdb/v2/testing" @@ -34,7 +36,7 @@ func TestCheckService_FindCheckByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -50,7 +52,7 @@ func TestCheckService_FindCheckByID(t *testing.T) { name: "authorized to access id", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: id, @@ -78,7 +80,7 @@ func TestCheckService_FindCheckByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: id, @@ -99,9 +101,9 @@ func TestCheckService_FindCheckByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -275,7 +277,7 @@ func TestCheckService_UpdateCheck(t *testing.T) { CheckService influxdb.CheckService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -292,7 +294,7 @@ func TestCheckService_UpdateCheck(t *testing.T) { name: "authorized to update check", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -300,7 +302,7 @@ func TestCheckService_UpdateCheck(t *testing.T) { }, }, nil }, - UpdateCheckFn: func(ctx context.Context, id influxdb.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { + UpdateCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -337,7 +339,7 @@ func TestCheckService_UpdateCheck(t *testing.T) { name: "unauthorized to update check", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -345,7 +347,7 @@ func TestCheckService_UpdateCheck(t *testing.T) { }, }, nil }, - UpdateCheckFn: func(ctx context.Context, id influxdb.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { + UpdateCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -368,9 +370,9 @@ func TestCheckService_UpdateCheck(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -399,7 +401,7 @@ func TestCheckService_PatchCheck(t *testing.T) { CheckService influxdb.CheckService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -416,7 +418,7 @@ func TestCheckService_PatchCheck(t *testing.T) { name: "authorized to patch check", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -424,7 +426,7 @@ func TestCheckService_PatchCheck(t *testing.T) { }, }, nil }, - PatchCheckFn: func(ctx context.Context, id influxdb.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { + PatchCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -461,7 +463,7 @@ func TestCheckService_PatchCheck(t *testing.T) { name: "unauthorized to patch check", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -469,7 +471,7 @@ func TestCheckService_PatchCheck(t *testing.T) { }, }, nil }, - PatchCheckFn: func(ctx context.Context, id influxdb.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { + PatchCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -492,9 +494,9 @@ func TestCheckService_PatchCheck(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -518,7 +520,7 @@ func TestCheckService_DeleteCheck(t *testing.T) { CheckService influxdb.CheckService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -535,7 +537,7 @@ func TestCheckService_DeleteCheck(t *testing.T) { name: "authorized to delete check", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -543,7 +545,7 @@ func TestCheckService_DeleteCheck(t *testing.T) { }, }, nil }, - DeleteCheckFn: func(ctx context.Context, id influxdb.ID) error { + DeleteCheckFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -575,7 +577,7 @@ func TestCheckService_DeleteCheck(t *testing.T) { name: "unauthorized to delete check", fields: fields{ CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { + FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { return &check.Deadman{ Base: check.Base{ ID: 1, @@ -583,7 +585,7 @@ func TestCheckService_DeleteCheck(t *testing.T) { }, }, nil }, - DeleteCheckFn: func(ctx context.Context, id influxdb.ID) error { + DeleteCheckFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -601,9 +603,9 @@ func TestCheckService_DeleteCheck(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -628,7 +630,7 @@ func TestCheckService_CreateCheck(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -644,7 +646,7 @@ func TestCheckService_CreateCheck(t *testing.T) { name: "authorized to create check with org owner", fields: fields{ CheckService: &mock.CheckService{ - CreateCheckFn: func(ctx context.Context, chk influxdb.CheckCreate, userID influxdb.ID) error { + CreateCheckFn: func(ctx context.Context, chk influxdb.CheckCreate, userID platform.ID) error { return nil }, }, @@ -667,7 +669,7 @@ func TestCheckService_CreateCheck(t *testing.T) { name: "unauthorized to create check", fields: fields{ CheckService: &mock.CheckService{ - CreateCheckFn: func(ctx context.Context, chk influxdb.CheckCreate, userID influxdb.ID) error { + CreateCheckFn: func(ctx context.Context, chk influxdb.CheckCreate, userID platform.ID) error { return nil }, }, @@ -683,9 +685,9 @@ func TestCheckService_CreateCheck(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/checks is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/dashboard.go b/authorizer/dashboard.go index 2a16a6a1b95..17ebb46fb63 100644 --- a/authorizer/dashboard.go +++ b/authorizer/dashboard.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.DashboardService = (*DashboardService)(nil) @@ -22,7 +23,7 @@ func NewDashboardService(s influxdb.DashboardService) *DashboardService { } // FindDashboardByID checks to see if the authorizer on context has read access to the id provided. -func (s *DashboardService) FindDashboardByID(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { +func (s *DashboardService) FindDashboardByID(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { b, err := s.s.FindDashboardByID(ctx, id) if err != nil { return nil, err @@ -53,7 +54,7 @@ func (s *DashboardService) CreateDashboard(ctx context.Context, b *influxdb.Dash } // UpdateDashboard checks to see if the authorizer on context has write access to the dashboard provided. -func (s *DashboardService) UpdateDashboard(ctx context.Context, id influxdb.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { +func (s *DashboardService) UpdateDashboard(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { b, err := s.s.FindDashboardByID(ctx, id) if err != nil { return nil, err @@ -65,7 +66,7 @@ func (s *DashboardService) UpdateDashboard(ctx context.Context, id influxdb.ID, } // DeleteDashboard checks to see if the authorizer on context has write access to the dashboard provided. -func (s *DashboardService) DeleteDashboard(ctx context.Context, id influxdb.ID) error { +func (s *DashboardService) DeleteDashboard(ctx context.Context, id platform.ID) error { b, err := s.s.FindDashboardByID(ctx, id) if err != nil { return err @@ -76,7 +77,7 @@ func (s *DashboardService) DeleteDashboard(ctx context.Context, id influxdb.ID) return s.s.DeleteDashboard(ctx, id) } -func (s *DashboardService) AddDashboardCell(ctx context.Context, id influxdb.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { +func (s *DashboardService) AddDashboardCell(ctx context.Context, id platform.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { b, err := s.s.FindDashboardByID(ctx, id) if err != nil { return err @@ -87,7 +88,7 @@ func (s *DashboardService) AddDashboardCell(ctx context.Context, id influxdb.ID, return s.s.AddDashboardCell(ctx, id, c, opts) } -func (s *DashboardService) RemoveDashboardCell(ctx context.Context, dashboardID influxdb.ID, cellID influxdb.ID) error { +func (s *DashboardService) RemoveDashboardCell(ctx context.Context, dashboardID platform.ID, cellID platform.ID) error { b, err := s.s.FindDashboardByID(ctx, dashboardID) if err != nil { return err @@ -98,7 +99,7 @@ func (s *DashboardService) RemoveDashboardCell(ctx context.Context, dashboardID return s.s.RemoveDashboardCell(ctx, dashboardID, cellID) } -func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID influxdb.ID, cellID influxdb.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { +func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID platform.ID, cellID platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { b, err := s.s.FindDashboardByID(ctx, dashboardID) if err != nil { return nil, err @@ -109,7 +110,7 @@ func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID return s.s.UpdateDashboardCell(ctx, dashboardID, cellID, upd) } -func (s *DashboardService) GetDashboardCellView(ctx context.Context, dashboardID influxdb.ID, cellID influxdb.ID) (*influxdb.View, error) { +func (s *DashboardService) GetDashboardCellView(ctx context.Context, dashboardID platform.ID, cellID platform.ID) (*influxdb.View, error) { b, err := s.s.FindDashboardByID(ctx, dashboardID) if err != nil { return nil, err @@ -120,7 +121,7 @@ func (s *DashboardService) GetDashboardCellView(ctx context.Context, dashboardID return s.s.GetDashboardCellView(ctx, dashboardID, cellID) } -func (s *DashboardService) UpdateDashboardCellView(ctx context.Context, dashboardID influxdb.ID, cellID influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { +func (s *DashboardService) UpdateDashboardCellView(ctx context.Context, dashboardID platform.ID, cellID platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { b, err := s.s.FindDashboardByID(ctx, dashboardID) if err != nil { return nil, err @@ -131,7 +132,7 @@ func (s *DashboardService) UpdateDashboardCellView(ctx context.Context, dashboar return s.s.UpdateDashboardCellView(ctx, dashboardID, cellID, upd) } -func (s *DashboardService) ReplaceDashboardCells(ctx context.Context, id influxdb.ID, c []*influxdb.Cell) error { +func (s *DashboardService) ReplaceDashboardCells(ctx context.Context, id platform.ID, c []*influxdb.Cell) error { b, err := s.s.FindDashboardByID(ctx, id) if err != nil { return err diff --git a/authorizer/dashboard_test.go b/authorizer/dashboard_test.go index 78cbc585571..f817b92c9dc 100644 --- a/authorizer/dashboard_test.go +++ b/authorizer/dashboard_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -33,7 +35,7 @@ func TestDashboardService_FindDashboardByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -49,7 +51,7 @@ func TestDashboardService_FindDashboardByID(t *testing.T) { name: "authorized to access id", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: id, OrganizationID: 10, @@ -75,7 +77,7 @@ func TestDashboardService_FindDashboardByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: id, OrganizationID: 10, @@ -94,9 +96,9 @@ func TestDashboardService_FindDashboardByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -249,7 +251,7 @@ func TestDashboardService_UpdateDashboard(t *testing.T) { DashboardService influxdb.DashboardService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -266,13 +268,13 @@ func TestDashboardService_UpdateDashboard(t *testing.T) { name: "authorized to update dashboard", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: 1, OrganizationID: 10, }, nil }, - UpdateDashboardF: func(ctx context.Context, id influxdb.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { + UpdateDashboardF: func(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: 1, OrganizationID: 10, @@ -307,13 +309,13 @@ func TestDashboardService_UpdateDashboard(t *testing.T) { name: "unauthorized to update dashboard", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: 1, OrganizationID: 10, }, nil }, - UpdateDashboardF: func(ctx context.Context, id influxdb.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { + UpdateDashboardF: func(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: 1, OrganizationID: 10, @@ -334,9 +336,9 @@ func TestDashboardService_UpdateDashboard(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -360,7 +362,7 @@ func TestDashboardService_DeleteDashboard(t *testing.T) { DashboardService influxdb.DashboardService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -377,13 +379,13 @@ func TestDashboardService_DeleteDashboard(t *testing.T) { name: "authorized to delete dashboard", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: 1, OrganizationID: 10, }, nil }, - DeleteDashboardF: func(ctx context.Context, id influxdb.ID) error { + DeleteDashboardF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -415,13 +417,13 @@ func TestDashboardService_DeleteDashboard(t *testing.T) { name: "unauthorized to delete dashboard", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: 1, OrganizationID: 10, }, nil }, - DeleteDashboardF: func(ctx context.Context, id influxdb.ID) error { + DeleteDashboardF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -439,9 +441,9 @@ func TestDashboardService_DeleteDashboard(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -466,7 +468,7 @@ func TestDashboardService_CreateDashboard(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -521,9 +523,9 @@ func TestDashboardService_CreateDashboard(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/dashboards is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -548,7 +550,7 @@ func TestDashboardService_WriteDashboardCell(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -564,25 +566,25 @@ func TestDashboardService_WriteDashboardCell(t *testing.T) { name: "authorized to write dashboard cells/cell/view", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: id, OrganizationID: 10, }, nil }, - AddDashboardCellF: func(ctx context.Context, id influxdb.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { + AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { return nil }, - RemoveDashboardCellF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID) error { + RemoveDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID) error { return nil }, - ReplaceDashboardCellsF: func(ctx context.Context, id influxdb.ID, cs []*influxdb.Cell) error { + ReplaceDashboardCellsF: func(ctx context.Context, id platform.ID, cs []*influxdb.Cell) error { return nil }, - UpdateDashboardCellF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { + UpdateDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { return &influxdb.Cell{}, nil }, - UpdateDashboardCellViewF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { + UpdateDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { return &influxdb.View{}, nil }, }, @@ -605,25 +607,25 @@ func TestDashboardService_WriteDashboardCell(t *testing.T) { name: "unauthorized to write dashboard cells/cell/view", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: id, OrganizationID: 10, }, nil }, - AddDashboardCellF: func(ctx context.Context, id influxdb.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { + AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { return nil }, - ReplaceDashboardCellsF: func(ctx context.Context, id influxdb.ID, cs []*influxdb.Cell) error { + ReplaceDashboardCellsF: func(ctx context.Context, id platform.ID, cs []*influxdb.Cell) error { return nil }, - UpdateDashboardCellF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { + UpdateDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { return &influxdb.Cell{}, nil }, - RemoveDashboardCellF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID) error { + RemoveDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID) error { return nil }, - UpdateDashboardCellViewF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { + UpdateDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { return &influxdb.View{}, nil }, }, @@ -639,9 +641,9 @@ func TestDashboardService_WriteDashboardCell(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -678,7 +680,7 @@ func TestDashboardService_FindDashboardCellView(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -694,13 +696,13 @@ func TestDashboardService_FindDashboardCellView(t *testing.T) { name: "authorized to read dashboard cells/cell/view", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: id, OrganizationID: 10, }, nil }, - GetDashboardCellViewF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID) (*influxdb.View, error) { + GetDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID) (*influxdb.View, error) { return &influxdb.View{}, nil }, }, @@ -723,13 +725,13 @@ func TestDashboardService_FindDashboardCellView(t *testing.T) { name: "unauthorized to read dashboard cells/cell/view", fields: fields{ DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) { + FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { return &influxdb.Dashboard{ ID: id, OrganizationID: 10, }, nil }, - GetDashboardCellViewF: func(ctx context.Context, id influxdb.ID, cid influxdb.ID) (*influxdb.View, error) { + GetDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID) (*influxdb.View, error) { return &influxdb.View{}, nil }, }, @@ -745,9 +747,9 @@ func TestDashboardService_FindDashboardCellView(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/document.go b/authorizer/document.go index 82b65463e97..4c027de1f4e 100644 --- a/authorizer/document.go +++ b/authorizer/document.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.DocumentService = (*DocumentService)(nil) @@ -41,14 +42,14 @@ type documentStore struct { s influxdb.DocumentStore } -func newDocumentPermission(a influxdb.Action, orgID influxdb.ID, did *influxdb.ID) (*influxdb.Permission, error) { +func newDocumentPermission(a influxdb.Action, orgID platform.ID, did *platform.ID) (*influxdb.Permission, error) { if did != nil { return influxdb.NewPermissionAtID(*did, a, influxdb.DocumentsResourceType, orgID) } return influxdb.NewPermission(a, influxdb.DocumentsResourceType, orgID) } -func toPerms(action influxdb.Action, orgs map[influxdb.ID]influxdb.UserType, did *influxdb.ID) ([]influxdb.Permission, error) { +func toPerms(action influxdb.Action, orgs map[platform.ID]influxdb.UserType, did *platform.ID) ([]influxdb.Permission, error) { ps := make([]influxdb.Permission, 0, len(orgs)) for orgID := range orgs { p, err := newDocumentPermission(action, orgID, did) @@ -74,7 +75,7 @@ func (s *documentStore) CreateDocument(ctx context.Context, d *influxdb.Document return s.s.CreateDocument(ctx, d) } -func (s *documentStore) FindDocument(ctx context.Context, id influxdb.ID) (*influxdb.Document, error) { +func (s *documentStore) FindDocument(ctx context.Context, id platform.ID) (*influxdb.Document, error) { d, err := s.s.FindDocument(ctx, id) if err != nil { return nil, err @@ -89,7 +90,7 @@ func (s *documentStore) FindDocument(ctx context.Context, id influxdb.ID) (*infl return d, nil } -func (s *documentStore) FindDocuments(ctx context.Context, oid influxdb.ID) ([]*influxdb.Document, error) { +func (s *documentStore) FindDocuments(ctx context.Context, oid platform.ID) ([]*influxdb.Document, error) { if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.DocumentsResourceType, oid); err != nil { return nil, err } diff --git a/authorizer/label.go b/authorizer/label.go index ec044accf62..d1fafd2f718 100644 --- a/authorizer/label.go +++ b/authorizer/label.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.LabelService = (*LabelService)(nil) @@ -25,7 +26,7 @@ func NewLabelServiceWithOrg(s influxdb.LabelService, orgIDResolver OrgIDResolver } // FindLabelByID checks to see if the authorizer on context has read access to the label id provided. -func (s *LabelService) FindLabelByID(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) { +func (s *LabelService) FindLabelByID(ctx context.Context, id platform.ID) (*influxdb.Label, error) { l, err := s.s.FindLabelByID(ctx, id) if err != nil { return nil, err @@ -95,7 +96,7 @@ func (s *LabelService) CreateLabelMapping(ctx context.Context, m *influxdb.Label } // UpdateLabel checks to see if the authorizer on context has write access to the label provided. -func (s *LabelService) UpdateLabel(ctx context.Context, id influxdb.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { +func (s *LabelService) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { l, err := s.s.FindLabelByID(ctx, id) if err != nil { return nil, err @@ -107,7 +108,7 @@ func (s *LabelService) UpdateLabel(ctx context.Context, id influxdb.ID, upd infl } // DeleteLabel checks to see if the authorizer on context has write access to the label provided. -func (s *LabelService) DeleteLabel(ctx context.Context, id influxdb.ID) error { +func (s *LabelService) DeleteLabel(ctx context.Context, id platform.ID) error { l, err := s.s.FindLabelByID(ctx, id) if err != nil { return err diff --git a/authorizer/label_test.go b/authorizer/label_test.go index 51b71f04b59..a03173dd889 100644 --- a/authorizer/label_test.go +++ b/authorizer/label_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -21,7 +23,7 @@ const ( var ( orgOneInfluxID = influxdbtesting.MustIDBase16(orgOneID) orgSvc = &mock.OrganizationService{ - FindResourceOrganizationIDF: func(_ context.Context, _ influxdb.ResourceType, _ influxdb.ID) (influxdb.ID, error) { + FindResourceOrganizationIDF: func(_ context.Context, _ influxdb.ResourceType, _ platform.ID) (platform.ID, error) { return orgOneInfluxID, nil }, } @@ -46,7 +48,7 @@ func TestLabelService_FindLabelByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -62,7 +64,7 @@ func TestLabelService_FindLabelByID(t *testing.T) { name: "authorized to access id", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: id, OrgID: orgOneInfluxID, @@ -88,7 +90,7 @@ func TestLabelService_FindLabelByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: id, OrgID: orgOneInfluxID, @@ -107,9 +109,9 @@ func TestLabelService_FindLabelByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -292,7 +294,7 @@ func TestLabelService_UpdateLabel(t *testing.T) { LabelService influxdb.LabelService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -309,13 +311,13 @@ func TestLabelService_UpdateLabel(t *testing.T) { name: "authorized to update label", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, }, nil }, - UpdateLabelFn: func(ctx context.Context, id influxdb.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { + UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -343,13 +345,13 @@ func TestLabelService_UpdateLabel(t *testing.T) { name: "unauthorized to update label", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, }, nil }, - UpdateLabelFn: func(ctx context.Context, id influxdb.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { + UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -370,9 +372,9 @@ func TestLabelService_UpdateLabel(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -396,7 +398,7 @@ func TestLabelService_DeleteLabel(t *testing.T) { LabelService influxdb.LabelService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -413,13 +415,13 @@ func TestLabelService_DeleteLabel(t *testing.T) { name: "authorized to delete label", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, }, nil }, - DeleteLabelFn: func(ctx context.Context, id influxdb.ID) error { + DeleteLabelFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -445,13 +447,13 @@ func TestLabelService_DeleteLabel(t *testing.T) { name: "unauthorized to delete label", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, }, nil }, - DeleteLabelFn: func(ctx context.Context, id influxdb.ID) error { + DeleteLabelFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -470,9 +472,9 @@ func TestLabelService_DeleteLabel(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -527,9 +529,9 @@ func TestLabelService_CreateLabel(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/020f755c3c083000/labels is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -551,9 +553,9 @@ func TestLabelService_CreateLabel(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/020f755c3c083000/labels is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -806,9 +808,9 @@ func TestLabelService_FindResourceLabels(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/020f755c3c083000/buckets/000000000000000a is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -853,7 +855,7 @@ func TestLabelService_CreateLabelMapping(t *testing.T) { name: "authorized to create label mapping", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -894,7 +896,7 @@ func TestLabelService_CreateLabelMapping(t *testing.T) { name: "unauthorized to create label mapping for resources on which the user does not have write access", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -921,8 +923,8 @@ func TestLabelService_CreateLabelMapping(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.EUnauthorized, + err: &errors.Error{ + Code: errors.EUnauthorized, Msg: "write:orgs/020f755c3c083000/buckets/0000000000000002 is unauthorized", }, }, @@ -931,7 +933,7 @@ func TestLabelService_CreateLabelMapping(t *testing.T) { name: "unauthorized to create label mapping", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -958,9 +960,9 @@ func TestLabelService_CreateLabelMapping(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -1001,7 +1003,7 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) { name: "authorized to delete label mapping", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -1042,7 +1044,7 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) { name: "unauthorized to delete label mapping containing a resources on which the user does not have write access", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -1069,8 +1071,8 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.EUnauthorized, + err: &errors.Error{ + Code: errors.EUnauthorized, Msg: "write:orgs/020f755c3c083000/buckets/0000000000000002 is unauthorized", }, }, @@ -1079,7 +1081,7 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) { name: "unauthorized to delete label mapping", fields: fields{ LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) { + FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { return &influxdb.Label{ ID: 1, OrgID: orgOneInfluxID, @@ -1106,9 +1108,9 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/notebook.go b/authorizer/notebook.go new file mode 100644 index 00000000000..233c3f8f977 --- /dev/null +++ b/authorizer/notebook.go @@ -0,0 +1,83 @@ +package authorizer + +import ( + "context" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" +) + +var _ influxdb.NotebookService = (*NotebookService)(nil) + +// NotebookService wraps an influxdb.NotebookService and authorizes actions +// against it appropriately. +type NotebookService struct { + s influxdb.NotebookService +} + +// NewNotebookService constructs an instance of an authorizing check service. +func NewNotebookService(s influxdb.NotebookService) *NotebookService { + return &NotebookService{ + s: s, + } +} + +// GetNotebook checks to see if the authorizer on context has read access to the id provided. +func (s *NotebookService) GetNotebook(ctx context.Context, id platform.ID) (*influxdb.Notebook, error) { + nb, err := s.s.GetNotebook(ctx, id) + if err != nil { + return nil, err + } + if _, _, err := AuthorizeRead(ctx, influxdb.NotebooksResourceType, id, nb.OrgID); err != nil { + return nil, err + } + return nb, nil +} + +// CreateNotebook checks to see if the authorizer on context has write access for notebooks for organization id provided in the notebook body. +func (s *NotebookService) CreateNotebook(ctx context.Context, create *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { + if _, _, err := AuthorizeCreate(ctx, influxdb.NotebooksResourceType, create.OrgID); err != nil { + return nil, err + } + + return s.s.CreateNotebook(ctx, create) +} + +// UpdateNotebook checks to see if the authorizer on context has write access to the notebook provided. +func (s *NotebookService) UpdateNotebook(ctx context.Context, id platform.ID, update *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { + nb, err := s.s.GetNotebook(ctx, id) + if err != nil { + return nil, err + } + if _, _, err := AuthorizeWrite(ctx, influxdb.NotebooksResourceType, id, nb.OrgID); err != nil { + return nil, err + } + return s.s.UpdateNotebook(ctx, id, update) +} + +// DeleteNotebook checks to see if the authorizer on context has write access to the notebook provided. +func (s *NotebookService) DeleteNotebook(ctx context.Context, id platform.ID) error { + nb, err := s.s.GetNotebook(ctx, id) + if err != nil { + return err + } + if _, _, err := AuthorizeWrite(ctx, influxdb.NotebooksResourceType, id, nb.OrgID); err != nil { + return err + } + return s.s.DeleteNotebook(ctx, id) +} + +// ListNotebooks checks to see if the requesting user has read access to the provided org and returns a list of notebooks for that org if so. +func (s *NotebookService) ListNotebooks(ctx context.Context, filter influxdb.NotebookListFilter) ([]*influxdb.Notebook, error) { + if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.NotebooksResourceType, filter.OrgID); err != nil { + return nil, err + } + + ns, err := s.s.ListNotebooks(ctx, filter) + if err != nil { + return nil, err + } + + ns, _, err = AuthorizeFindNotebooks(ctx, ns) + return ns, err +} diff --git a/authorizer/notebook_test.go b/authorizer/notebook_test.go new file mode 100644 index 00000000000..7842ec3e91d --- /dev/null +++ b/authorizer/notebook_test.go @@ -0,0 +1,315 @@ +package authorizer_test + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/authorizer" + influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" + "github.com/influxdata/influxdb/v2/mock" + influxdbtesting "github.com/influxdata/influxdb/v2/testing" + "github.com/stretchr/testify/require" +) + +var ( + orgID1 = influxdbtesting.IDPtr(1) + orgID2 = influxdbtesting.IDPtr(10) + nbID = influxdbtesting.IDPtr(2) +) + +func Test_GetNotebook(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + notebookOrg *platform.ID + permissionOrg *platform.ID + wantRet *influxdb.Notebook + wantErr error + }{ + { + "authorized to access notebook by id", + orgID1, + orgID1, + newTestNotebook(*orgID1), + nil, + }, + { + "not authorized to access notebook by id", + orgID1, + orgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("read:orgs/%s/notebooks/%s is unauthorized", orgID1, nbID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockNotebookService(ctrlr) + s := authorizer.NewNotebookService(svc) + + svc.EXPECT(). + GetNotebook(gomock.Any(), *nbID). + Return(newTestNotebook(*orgID1), nil) + + perm := newTestNotebooksPermission(influxdb.ReadAction, tt.permissionOrg) + + ctx := context.Background() + ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.GetNotebook(ctx, *nbID) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_CreateNotebook(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + notebookOrg *platform.ID + permissionOrg *platform.ID + wantRet *influxdb.Notebook + wantErr error + }{ + { + "authorized to create a notebook with the given org", + orgID1, + orgID1, + newTestNotebook(*orgID1), + nil, + }, + { + "not authorized to create a notebook with the given org", + orgID1, + orgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/notebooks is unauthorized", orgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockNotebookService(ctrlr) + s := authorizer.NewNotebookService(svc) + + perm := newTestNotebooksPermission(influxdb.WriteAction, tt.permissionOrg) + nb := newTestReqBody(*tt.notebookOrg) + + if tt.wantErr == nil { + svc.EXPECT(). + CreateNotebook(gomock.Any(), nb). + Return(tt.wantRet, nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.CreateNotebook(ctx, nb) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_UpdateNotebook(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + notebookOrg *platform.ID + permissionOrg *platform.ID + wantRet *influxdb.Notebook + wantErr error + }{ + { + "authorized to update notebook by id", + orgID1, + orgID1, + newTestNotebook(*orgID1), + nil, + }, + { + "not authorized to update notebook by id", + orgID1, + orgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/notebooks/%s is unauthorized", orgID1, nbID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockNotebookService(ctrlr) + s := authorizer.NewNotebookService(svc) + + svc.EXPECT(). + GetNotebook(gomock.Any(), *nbID). + Return(newTestNotebook(*tt.notebookOrg), nil) + + perm := newTestNotebooksPermission(influxdb.WriteAction, tt.permissionOrg) + nb := newTestReqBody(*tt.notebookOrg) + + if tt.wantErr == nil { + svc.EXPECT(). + UpdateNotebook(gomock.Any(), *nbID, nb). + Return(tt.wantRet, nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.UpdateNotebook(ctx, *nbID, nb) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func Test_DeleteNotebook(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + notebookOrg *platform.ID + permissionOrg *platform.ID + wantErr error + }{ + { + "authorized to delete notebook by id", + orgID1, + orgID1, + nil, + }, + { + "not authorized to delete notebook by id", + orgID1, + orgID2, + &errors.Error{ + Msg: fmt.Sprintf("write:orgs/%s/notebooks/%s is unauthorized", orgID1, nbID), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockNotebookService(ctrlr) + s := authorizer.NewNotebookService(svc) + + svc.EXPECT(). + GetNotebook(gomock.Any(), *nbID). + Return(newTestNotebook(*tt.notebookOrg), nil) + + perm := newTestNotebooksPermission(influxdb.WriteAction, tt.permissionOrg) + + if tt.wantErr == nil { + svc.EXPECT(). + DeleteNotebook(gomock.Any(), *nbID). + Return(nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got := s.DeleteNotebook(ctx, *nbID) + require.Equal(t, tt.wantErr, got) + }) + } +} + +func Test_ListNotebooks(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + notebookOrg *platform.ID + permissionOrg *platform.ID + wantRet []*influxdb.Notebook + wantErr error + }{ + { + "authorized to list notebooks for the specified org", + orgID1, + orgID1, + []*influxdb.Notebook{}, + nil, + }, + { + "not authorized to list notebooks for the specified org", + orgID1, + orgID2, + nil, + &errors.Error{ + Msg: fmt.Sprintf("read:orgs/%s/notebooks is unauthorized", orgID1), + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockNotebookService(ctrlr) + s := authorizer.NewNotebookService(svc) + + perm := newTestNotebooksPermission(influxdb.ReadAction, tt.permissionOrg) + filter := influxdb.NotebookListFilter{OrgID: *tt.notebookOrg} + + if tt.wantErr == nil { + svc.EXPECT(). + ListNotebooks(gomock.Any(), filter). + Return(tt.wantRet, nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) + got, err := s.ListNotebooks(ctx, filter) + require.Equal(t, tt.wantErr, err) + require.Equal(t, tt.wantRet, got) + }) + } +} + +func newTestNotebook(orgID platform.ID) *influxdb.Notebook { + return &influxdb.Notebook{ + OrgID: orgID, + ID: *nbID, + Name: "test notebook", + Spec: influxdb.NotebookSpec{ + "hello": "goodbye", + }, + } +} + +func newTestReqBody(orgID platform.ID) *influxdb.NotebookReqBody { + return &influxdb.NotebookReqBody{ + OrgID: orgID, + Name: "testing", + Spec: influxdb.NotebookSpec{ + "hello": "goodbye", + }, + } +} + +func newTestNotebooksPermission(action influxdb.Action, orgID *platform.ID) influxdb.Permission { + return influxdb.Permission{ + Action: action, + Resource: influxdb.Resource{ + Type: influxdb.NotebooksResourceType, + OrgID: orgID, + }, + } +} diff --git a/authorizer/notification_endpoint.go b/authorizer/notification_endpoint.go index 856543b1c69..c316e7b2ff8 100644 --- a/authorizer/notification_endpoint.go +++ b/authorizer/notification_endpoint.go @@ -4,6 +4,8 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) var _ influxdb.NotificationEndpointService = (*NotificationEndpointService)(nil) @@ -30,7 +32,7 @@ func NewNotificationEndpointService( } // FindNotificationEndpointByID checks to see if the authorizer on context has read access to the id provided. -func (s *NotificationEndpointService) FindNotificationEndpointByID(ctx context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { +func (s *NotificationEndpointService) FindNotificationEndpointByID(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { edp, err := s.s.FindNotificationEndpointByID(ctx, id) if err != nil { return nil, err @@ -45,8 +47,8 @@ func (s *NotificationEndpointService) FindNotificationEndpointByID(ctx context.C func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { // TODO: This is a temporary fix as to not fetch the entire collection when no filter is provided. if !filter.UserID.Valid() && filter.OrgID == nil { - return nil, 0, &influxdb.Error{ - Code: influxdb.EUnauthorized, + return nil, 0, &errors.Error{ + Code: errors.EUnauthorized, Msg: "cannot process a request without a org or user filter", } } @@ -61,7 +63,7 @@ func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Cont } // CreateNotificationEndpoint checks to see if the authorizer on context has write access to the global notification endpoint resource. -func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, edp influxdb.NotificationEndpoint, userID influxdb.ID) error { +func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, edp influxdb.NotificationEndpoint, userID platform.ID) error { if _, _, err := AuthorizeCreate(ctx, influxdb.NotificationEndpointResourceType, edp.GetOrgID()); err != nil { return err } @@ -69,7 +71,7 @@ func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Con } // UpdateNotificationEndpoint checks to see if the authorizer on context has write access to the notification endpoint provided. -func (s *NotificationEndpointService) UpdateNotificationEndpoint(ctx context.Context, id influxdb.ID, upd influxdb.NotificationEndpoint, userID influxdb.ID) (influxdb.NotificationEndpoint, error) { +func (s *NotificationEndpointService) UpdateNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { edp, err := s.FindNotificationEndpointByID(ctx, id) if err != nil { return nil, err @@ -81,7 +83,7 @@ func (s *NotificationEndpointService) UpdateNotificationEndpoint(ctx context.Con } // PatchNotificationEndpoint checks to see if the authorizer on context has write access to the notification endpoint provided. -func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Context, id influxdb.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { +func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { edp, err := s.FindNotificationEndpointByID(ctx, id) if err != nil { return nil, err @@ -93,7 +95,7 @@ func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Cont } // DeleteNotificationEndpoint checks to see if the authorizer on context has write access to the notification endpoint provided. -func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id influxdb.ID) ([]influxdb.SecretField, influxdb.ID, error) { +func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { edp, err := s.FindNotificationEndpointByID(ctx, id) if err != nil { return nil, 0, err diff --git a/authorizer/notification_endpoint_test.go b/authorizer/notification_endpoint_test.go index 58698c7a4fa..04fb3b098c5 100644 --- a/authorizer/notification_endpoint_test.go +++ b/authorizer/notification_endpoint_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/notification/endpoint" influxdbtesting "github.com/influxdata/influxdb/v2/testing" @@ -34,7 +36,7 @@ func TestNotificationEndpointService_FindNotificationEndpointByID(t *testing.T) } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -50,8 +52,8 @@ func TestNotificationEndpointService_FindNotificationEndpointByID(t *testing.T) name: "authorized to access id with org", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctx context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { - orgID := influxdb.ID(10) + FindNotificationEndpointByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { + orgID := platform.ID(10) return &endpoint.Slack{ Base: endpoint.Base{ ID: &id, @@ -79,8 +81,8 @@ func TestNotificationEndpointService_FindNotificationEndpointByID(t *testing.T) name: "unauthorized to access id", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctx context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { - orgID := influxdb.ID(10) + FindNotificationEndpointByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { + orgID := platform.ID(10) return &endpoint.Slack{ Base: endpoint.Base{ ID: &id, @@ -101,9 +103,9 @@ func TestNotificationEndpointService_FindNotificationEndpointByID(t *testing.T) id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -199,7 +201,7 @@ func TestNotificationEndpointService_FindNotificationEndpoints(t *testing.T) { ctx := context.Background() ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - oid := influxdb.ID(10) + oid := platform.ID(10) edps, _, err := s.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{OrgID: &oid}) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) @@ -215,7 +217,7 @@ func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { NotificationEndpointService influxdb.NotificationEndpointService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -232,7 +234,7 @@ func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { name: "authorized to update notificationEndpoint with org owner", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { + FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -240,7 +242,7 @@ func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { }, }, nil }, - UpdateNotificationEndpointF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationEndpoint, userID influxdb.ID) (influxdb.NotificationEndpoint, error) { + UpdateNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -277,7 +279,7 @@ func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { name: "unauthorized to update notificationEndpoint", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { + FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -285,7 +287,7 @@ func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { }, }, nil }, - UpdateNotificationEndpointF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationEndpoint, userID influxdb.ID) (influxdb.NotificationEndpoint, error) { + UpdateNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -308,9 +310,9 @@ func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -325,7 +327,7 @@ func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { ctx := context.Background() ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - _, err := s.UpdateNotificationEndpoint(ctx, tt.args.id, &endpoint.Slack{}, influxdb.ID(1)) + _, err := s.UpdateNotificationEndpoint(ctx, tt.args.id, &endpoint.Slack{}, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } @@ -336,7 +338,7 @@ func TestNotificationEndpointService_PatchNotificationEndpoint(t *testing.T) { NotificationEndpointService influxdb.NotificationEndpointService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -353,7 +355,7 @@ func TestNotificationEndpointService_PatchNotificationEndpoint(t *testing.T) { name: "authorized to patch notificationEndpoint", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { + FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -361,7 +363,7 @@ func TestNotificationEndpointService_PatchNotificationEndpoint(t *testing.T) { }, }, nil }, - PatchNotificationEndpointF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { + PatchNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -398,7 +400,7 @@ func TestNotificationEndpointService_PatchNotificationEndpoint(t *testing.T) { name: "unauthorized to patch notificationEndpoint", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { + FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -406,7 +408,7 @@ func TestNotificationEndpointService_PatchNotificationEndpoint(t *testing.T) { }, }, nil }, - PatchNotificationEndpointF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { + PatchNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -429,9 +431,9 @@ func TestNotificationEndpointService_PatchNotificationEndpoint(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -456,7 +458,7 @@ func TestNotificationEndpointService_DeleteNotificationEndpoint(t *testing.T) { NotificationEndpointService influxdb.NotificationEndpointService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -473,7 +475,7 @@ func TestNotificationEndpointService_DeleteNotificationEndpoint(t *testing.T) { name: "authorized to delete notificationEndpoint", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { + FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -481,7 +483,7 @@ func TestNotificationEndpointService_DeleteNotificationEndpoint(t *testing.T) { }, }, nil }, - DeleteNotificationEndpointF: func(ctx context.Context, id influxdb.ID) ([]influxdb.SecretField, influxdb.ID, error) { + DeleteNotificationEndpointF: func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { return nil, 0, nil }, }, @@ -513,7 +515,7 @@ func TestNotificationEndpointService_DeleteNotificationEndpoint(t *testing.T) { name: "unauthorized to delete notificationEndpoint", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationEndpoint, error) { + FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { return &endpoint.Slack{ Base: endpoint.Base{ ID: idPtr(1), @@ -521,7 +523,7 @@ func TestNotificationEndpointService_DeleteNotificationEndpoint(t *testing.T) { }, }, nil }, - DeleteNotificationEndpointF: func(ctx context.Context, id influxdb.ID) ([]influxdb.SecretField, influxdb.ID, error) { + DeleteNotificationEndpointF: func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { return nil, 0, nil }, }, @@ -539,9 +541,9 @@ func TestNotificationEndpointService_DeleteNotificationEndpoint(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -568,7 +570,7 @@ func TestNotificationEndpointService_CreateNotificationEndpoint(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -584,7 +586,7 @@ func TestNotificationEndpointService_CreateNotificationEndpoint(t *testing.T) { name: "authorized to create notificationEndpoint", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID influxdb.ID) error { + CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID platform.ID) error { return nil }, }, @@ -607,7 +609,7 @@ func TestNotificationEndpointService_CreateNotificationEndpoint(t *testing.T) { name: "authorized to create notificationEndpoint with org owner", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID influxdb.ID) error { + CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID platform.ID) error { return nil }, }, @@ -630,7 +632,7 @@ func TestNotificationEndpointService_CreateNotificationEndpoint(t *testing.T) { name: "unauthorized to create notificationEndpoint", fields: fields{ NotificationEndpointService: &mock.NotificationEndpointService{ - CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID influxdb.ID) error { + CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID platform.ID) error { return nil }, }, @@ -646,9 +648,9 @@ func TestNotificationEndpointService_CreateNotificationEndpoint(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationEndpoints is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -666,12 +668,12 @@ func TestNotificationEndpointService_CreateNotificationEndpoint(t *testing.T) { err := s.CreateNotificationEndpoint(ctx, &endpoint.Slack{ Base: endpoint.Base{ OrgID: idPtr(tt.args.orgID)}, - }, influxdb.ID(1)) + }, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } } -func idPtr(id influxdb.ID) *influxdb.ID { +func idPtr(id platform.ID) *platform.ID { return &id } diff --git a/authorizer/notification_rule.go b/authorizer/notification_rule.go index d47cf47c7c2..1a586d70d15 100644 --- a/authorizer/notification_rule.go +++ b/authorizer/notification_rule.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.NotificationRuleStore = (*NotificationRuleStore)(nil) @@ -26,7 +27,7 @@ func NewNotificationRuleStore(s influxdb.NotificationRuleStore, urm influxdb.Use } // FindNotificationRuleByID checks to see if the authorizer on context has read access to the id provided. -func (s *NotificationRuleStore) FindNotificationRuleByID(ctx context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { +func (s *NotificationRuleStore) FindNotificationRuleByID(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { nr, err := s.s.FindNotificationRuleByID(ctx, id) if err != nil { return nil, err @@ -49,7 +50,7 @@ func (s *NotificationRuleStore) FindNotificationRules(ctx context.Context, filte } // CreateNotificationRule checks to see if the authorizer on context has write access to the global notification rule resource. -func (s *NotificationRuleStore) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID influxdb.ID) error { +func (s *NotificationRuleStore) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { if _, _, err := AuthorizeCreate(ctx, influxdb.NotificationRuleResourceType, nr.GetOrgID()); err != nil { return err } @@ -57,7 +58,7 @@ func (s *NotificationRuleStore) CreateNotificationRule(ctx context.Context, nr i } // UpdateNotificationRule checks to see if the authorizer on context has write access to the notification rule provided. -func (s *NotificationRuleStore) UpdateNotificationRule(ctx context.Context, id influxdb.ID, upd influxdb.NotificationRuleCreate, userID influxdb.ID) (influxdb.NotificationRule, error) { +func (s *NotificationRuleStore) UpdateNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { nr, err := s.FindNotificationRuleByID(ctx, id) if err != nil { return nil, err @@ -69,7 +70,7 @@ func (s *NotificationRuleStore) UpdateNotificationRule(ctx context.Context, id i } // PatchNotificationRule checks to see if the authorizer on context has write access to the notification rule provided. -func (s *NotificationRuleStore) PatchNotificationRule(ctx context.Context, id influxdb.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { +func (s *NotificationRuleStore) PatchNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { nr, err := s.s.FindNotificationRuleByID(ctx, id) if err != nil { return nil, err @@ -81,7 +82,7 @@ func (s *NotificationRuleStore) PatchNotificationRule(ctx context.Context, id in } // DeleteNotificationRule checks to see if the authorizer on context has write access to the notification rule provided. -func (s *NotificationRuleStore) DeleteNotificationRule(ctx context.Context, id influxdb.ID) error { +func (s *NotificationRuleStore) DeleteNotificationRule(ctx context.Context, id platform.ID) error { nr, err := s.s.FindNotificationRuleByID(ctx, id) if err != nil { return err diff --git a/authorizer/notification_rule_test.go b/authorizer/notification_rule_test.go index a45f721b1f7..adb1b2818b5 100644 --- a/authorizer/notification_rule_test.go +++ b/authorizer/notification_rule_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/notification/rule" influxdbtesting "github.com/influxdata/influxdb/v2/testing" @@ -34,7 +36,7 @@ func TestNotificationRuleStore_FindNotificationRuleByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -50,7 +52,7 @@ func TestNotificationRuleStore_FindNotificationRuleByID(t *testing.T) { name: "authorized to access id", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctx context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: id, @@ -78,7 +80,7 @@ func TestNotificationRuleStore_FindNotificationRuleByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctx context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: id, @@ -99,9 +101,9 @@ func TestNotificationRuleStore_FindNotificationRuleByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -275,7 +277,7 @@ func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { NotificationRuleStore influxdb.NotificationRuleStore } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -292,7 +294,7 @@ func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { name: "authorized to update notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -300,7 +302,7 @@ func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { }, }, nil }, - UpdateNotificationRuleF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationRuleCreate, userID influxdb.ID) (influxdb.NotificationRule, error) { + UpdateNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -337,7 +339,7 @@ func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { name: "unauthorized to update notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -345,7 +347,7 @@ func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { }, }, nil }, - UpdateNotificationRuleF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationRuleCreate, userID influxdb.ID) (influxdb.NotificationRule, error) { + UpdateNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -368,9 +370,9 @@ func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -388,7 +390,7 @@ func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { Status: influxdb.Active, } - _, err := s.UpdateNotificationRule(ctx, tt.args.id, nrc, influxdb.ID(1)) + _, err := s.UpdateNotificationRule(ctx, tt.args.id, nrc, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } @@ -399,7 +401,7 @@ func TestNotificationRuleStore_PatchNotificationRule(t *testing.T) { NotificationRuleStore influxdb.NotificationRuleStore } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -416,7 +418,7 @@ func TestNotificationRuleStore_PatchNotificationRule(t *testing.T) { name: "authorized to patch notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -424,7 +426,7 @@ func TestNotificationRuleStore_PatchNotificationRule(t *testing.T) { }, }, nil }, - PatchNotificationRuleF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { + PatchNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -461,7 +463,7 @@ func TestNotificationRuleStore_PatchNotificationRule(t *testing.T) { name: "unauthorized to patch notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -469,7 +471,7 @@ func TestNotificationRuleStore_PatchNotificationRule(t *testing.T) { }, }, nil }, - PatchNotificationRuleF: func(ctx context.Context, id influxdb.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { + PatchNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -492,9 +494,9 @@ func TestNotificationRuleStore_PatchNotificationRule(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -518,7 +520,7 @@ func TestNotificationRuleStore_DeleteNotificationRule(t *testing.T) { NotificationRuleStore influxdb.NotificationRuleStore } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -535,7 +537,7 @@ func TestNotificationRuleStore_DeleteNotificationRule(t *testing.T) { name: "authorized to delete notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -543,7 +545,7 @@ func TestNotificationRuleStore_DeleteNotificationRule(t *testing.T) { }, }, nil }, - DeleteNotificationRuleF: func(ctx context.Context, id influxdb.ID) error { + DeleteNotificationRuleF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -575,7 +577,7 @@ func TestNotificationRuleStore_DeleteNotificationRule(t *testing.T) { name: "unauthorized to delete notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id influxdb.ID) (influxdb.NotificationRule, error) { + FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { return &rule.Slack{ Base: rule.Base{ ID: 1, @@ -583,7 +585,7 @@ func TestNotificationRuleStore_DeleteNotificationRule(t *testing.T) { }, }, nil }, - DeleteNotificationRuleF: func(ctx context.Context, id influxdb.ID) error { + DeleteNotificationRuleF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -601,9 +603,9 @@ func TestNotificationRuleStore_DeleteNotificationRule(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -628,7 +630,7 @@ func TestNotificationRuleStore_CreateNotificationRule(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -644,7 +646,7 @@ func TestNotificationRuleStore_CreateNotificationRule(t *testing.T) { name: "authorized to create notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - CreateNotificationRuleF: func(ctx context.Context, tc influxdb.NotificationRuleCreate, userID influxdb.ID) error { + CreateNotificationRuleF: func(ctx context.Context, tc influxdb.NotificationRuleCreate, userID platform.ID) error { return nil }, }, @@ -667,7 +669,7 @@ func TestNotificationRuleStore_CreateNotificationRule(t *testing.T) { name: "unauthorized to create notificationRule", fields: fields{ NotificationRuleStore: &mock.NotificationRuleStore{ - CreateNotificationRuleF: func(ctx context.Context, tc influxdb.NotificationRuleCreate, userID influxdb.ID) error { + CreateNotificationRuleF: func(ctx context.Context, tc influxdb.NotificationRuleCreate, userID platform.ID) error { return nil }, }, @@ -683,9 +685,9 @@ func TestNotificationRuleStore_CreateNotificationRule(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/notificationRules is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -708,7 +710,7 @@ func TestNotificationRuleStore_CreateNotificationRule(t *testing.T) { Status: influxdb.Active, } - err := s.CreateNotificationRule(ctx, nrc, influxdb.ID(1)) + err := s.CreateNotificationRule(ctx, nrc, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } diff --git a/authorizer/org.go b/authorizer/org.go index 232a3bacd10..e57b9cfbcd3 100644 --- a/authorizer/org.go +++ b/authorizer/org.go @@ -5,6 +5,7 @@ import ( "github.com/influxdata/influxdb/v2" icontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.OrganizationService = (*OrgService)(nil) @@ -23,7 +24,7 @@ func NewOrgService(s influxdb.OrganizationService) *OrgService { } // FindOrganizationByID checks to see if the authorizer on context has read access to the id provided. -func (s *OrgService) FindOrganizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { +func (s *OrgService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { if _, _, err := AuthorizeReadOrg(ctx, id); err != nil { return nil, err } @@ -74,7 +75,7 @@ func (s *OrgService) CreateOrganization(ctx context.Context, o *influxdb.Organiz } // UpdateOrganization checks to see if the authorizer on context has write access to the organization provided. -func (s *OrgService) UpdateOrganization(ctx context.Context, id influxdb.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { +func (s *OrgService) UpdateOrganization(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { if _, _, err := AuthorizeWriteOrg(ctx, id); err != nil { return nil, err } @@ -82,7 +83,7 @@ func (s *OrgService) UpdateOrganization(ctx context.Context, id influxdb.ID, upd } // DeleteOrganization checks to see if the authorizer on context has write access to the organization provided. -func (s *OrgService) DeleteOrganization(ctx context.Context, id influxdb.ID) error { +func (s *OrgService) DeleteOrganization(ctx context.Context, id platform.ID) error { if _, _, err := AuthorizeWriteOrg(ctx, id); err != nil { return err } diff --git a/authorizer/org_test.go b/authorizer/org_test.go index 5a539825b36..e5ea7785f5c 100644 --- a/authorizer/org_test.go +++ b/authorizer/org_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -33,7 +35,7 @@ func TestOrgService_FindOrganizationByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -49,7 +51,7 @@ func TestOrgService_FindOrganizationByID(t *testing.T) { name: "authorized to access id", fields: fields{ OrgService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: id, }, nil @@ -74,7 +76,7 @@ func TestOrgService_FindOrganizationByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ OrgService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: id, }, nil @@ -92,9 +94,9 @@ func TestOrgService_FindOrganizationByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -174,9 +176,9 @@ func TestOrgService_FindOrganization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -314,7 +316,7 @@ func TestOrgService_UpdateOrganization(t *testing.T) { OrgService influxdb.OrganizationService } type args struct { - id influxdb.ID + id platform.ID permission influxdb.Permission } type wants struct { @@ -331,7 +333,7 @@ func TestOrgService_UpdateOrganization(t *testing.T) { name: "authorized to update org", fields: fields{ OrgService: &mock.OrganizationService{ - UpdateOrganizationF: func(ctx context.Context, id influxdb.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { + UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: 1, }, nil @@ -356,7 +358,7 @@ func TestOrgService_UpdateOrganization(t *testing.T) { name: "unauthorized to update org", fields: fields{ OrgService: &mock.OrganizationService{ - UpdateOrganizationF: func(ctx context.Context, id influxdb.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { + UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { return &influxdb.Organization{ ID: 1, }, nil @@ -374,9 +376,9 @@ func TestOrgService_UpdateOrganization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -400,7 +402,7 @@ func TestOrgService_DeleteOrganization(t *testing.T) { OrgService influxdb.OrganizationService } type args struct { - id influxdb.ID + id platform.ID permission influxdb.Permission } type wants struct { @@ -417,7 +419,7 @@ func TestOrgService_DeleteOrganization(t *testing.T) { name: "authorized to delete org", fields: fields{ OrgService: &mock.OrganizationService{ - DeleteOrganizationF: func(ctx context.Context, id influxdb.ID) error { + DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -440,7 +442,7 @@ func TestOrgService_DeleteOrganization(t *testing.T) { name: "unauthorized to delete org", fields: fields{ OrgService: &mock.OrganizationService{ - DeleteOrganizationF: func(ctx context.Context, id influxdb.ID) error { + DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -456,9 +458,9 @@ func TestOrgService_DeleteOrganization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -534,9 +536,9 @@ func TestOrgService_CreateOrganization(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/password.go b/authorizer/password.go index 7d2c14daaf0..b0605d65dbd 100644 --- a/authorizer/password.go +++ b/authorizer/password.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) // PasswordService is a new authorization middleware for a password service. @@ -17,7 +18,7 @@ func NewPasswordService(svc influxdb.PasswordsService) *PasswordService { } // SetPassword overrides the password of a known user. -func (s *PasswordService) SetPassword(ctx context.Context, userID influxdb.ID, password string) error { +func (s *PasswordService) SetPassword(ctx context.Context, userID platform.ID, password string) error { if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, userID); err != nil { return err } @@ -26,12 +27,12 @@ func (s *PasswordService) SetPassword(ctx context.Context, userID influxdb.ID, p // ComparePassword checks if the password matches the password recorded. // Passwords that do not match return errors. -func (s *PasswordService) ComparePassword(ctx context.Context, userID influxdb.ID, password string) error { +func (s *PasswordService) ComparePassword(ctx context.Context, userID platform.ID, password string) error { panic("not implemented") } // CompareAndSetPassword checks the password and if they match // updates to the new password. -func (s *PasswordService) CompareAndSetPassword(ctx context.Context, userID influxdb.ID, old string, new string) error { +func (s *PasswordService) CompareAndSetPassword(ctx context.Context, userID platform.ID, old string, new string) error { panic("not implemented") } diff --git a/authorizer/password_test.go b/authorizer/password_test.go index ac1754541eb..b0b6df6729f 100644 --- a/authorizer/password_test.go +++ b/authorizer/password_test.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" icontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/mock" "github.com/stretchr/testify/require" ) @@ -14,7 +15,7 @@ import ( func TestPasswordService(t *testing.T) { t.Run("SetPassword", func(t *testing.T) { t.Run("user with permissions should proceed", func(t *testing.T) { - userID := influxdb.ID(1) + userID := platform.ID(1) permission := influxdb.Permission{ Action: influxdb.WriteAction, @@ -25,7 +26,7 @@ func TestPasswordService(t *testing.T) { } fakeSVC := mock.NewPasswordsService() - fakeSVC.SetPasswordFn = func(_ context.Context, _ influxdb.ID, _ string) error { + fakeSVC.SetPasswordFn = func(_ context.Context, _ platform.ID, _ string) error { return nil } s := authorizer.NewPasswordService(fakeSVC) @@ -37,8 +38,8 @@ func TestPasswordService(t *testing.T) { }) t.Run("user without permissions should proceed", func(t *testing.T) { - goodUserID := influxdb.ID(1) - badUserID := influxdb.ID(3) + goodUserID := platform.ID(1) + badUserID := platform.ID(3) tests := []struct { name string @@ -82,7 +83,7 @@ func TestPasswordService(t *testing.T) { for _, tt := range tests { fn := func(t *testing.T) { fakeSVC := &mock.PasswordsService{ - SetPasswordFn: func(_ context.Context, _ influxdb.ID, _ string) error { + SetPasswordFn: func(_ context.Context, _ platform.ID, _ string) error { return nil }, } diff --git a/authorizer/restore.go b/authorizer/restore.go index f2fd87fa5fc..51f5e9f5035 100644 --- a/authorizer/restore.go +++ b/authorizer/restore.go @@ -5,6 +5,7 @@ import ( "io" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/kit/tracing" ) @@ -33,7 +34,7 @@ func (b RestoreService) RestoreKVStore(ctx context.Context, r io.Reader) error { return b.s.RestoreKVStore(ctx, r) } -func (b RestoreService) RestoreBucket(ctx context.Context, id influxdb.ID, dbi []byte) (shardIDMap map[uint64]uint64, err error) { +func (b RestoreService) RestoreBucket(ctx context.Context, id platform.ID, dbi []byte) (shardIDMap map[uint64]uint64, err error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() diff --git a/authorizer/scraper.go b/authorizer/scraper.go index 2fe424840e0..c2c3c8c17c8 100644 --- a/authorizer/scraper.go +++ b/authorizer/scraper.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.ScraperTargetStoreService = (*ScraperTargetStoreService)(nil) @@ -28,7 +29,7 @@ func NewScraperTargetStoreService(s influxdb.ScraperTargetStoreService, } // GetTargetByID checks to see if the authorizer on context has read access to the id provided. -func (s *ScraperTargetStoreService) GetTargetByID(ctx context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { +func (s *ScraperTargetStoreService) GetTargetByID(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { st, err := s.s.GetTargetByID(ctx, id) if err != nil { return nil, err @@ -52,7 +53,7 @@ func (s *ScraperTargetStoreService) ListTargets(ctx context.Context, filter infl } // AddTarget checks to see if the authorizer on context has write access to the global scraper target resource. -func (s *ScraperTargetStoreService) AddTarget(ctx context.Context, st *influxdb.ScraperTarget, userID influxdb.ID) error { +func (s *ScraperTargetStoreService) AddTarget(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { if _, _, err := AuthorizeCreate(ctx, influxdb.ScraperResourceType, st.OrgID); err != nil { return err } @@ -63,7 +64,7 @@ func (s *ScraperTargetStoreService) AddTarget(ctx context.Context, st *influxdb. } // UpdateTarget checks to see if the authorizer on context has write access to the scraper target provided. -func (s *ScraperTargetStoreService) UpdateTarget(ctx context.Context, upd *influxdb.ScraperTarget, userID influxdb.ID) (*influxdb.ScraperTarget, error) { +func (s *ScraperTargetStoreService) UpdateTarget(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { st, err := s.s.GetTargetByID(ctx, upd.ID) if err != nil { return nil, err @@ -78,7 +79,7 @@ func (s *ScraperTargetStoreService) UpdateTarget(ctx context.Context, upd *influ } // RemoveTarget checks to see if the authorizer on context has write access to the scraper target provided. -func (s *ScraperTargetStoreService) RemoveTarget(ctx context.Context, id influxdb.ID) error { +func (s *ScraperTargetStoreService) RemoveTarget(ctx context.Context, id platform.ID) error { st, err := s.s.GetTargetByID(ctx, id) if err != nil { return err diff --git a/authorizer/scraper_test.go b/authorizer/scraper_test.go index 7622cc76562..18dca984b91 100644 --- a/authorizer/scraper_test.go +++ b/authorizer/scraper_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -33,7 +35,7 @@ func TestScraperTargetStoreService_GetTargetByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -49,7 +51,7 @@ func TestScraperTargetStoreService_GetTargetByID(t *testing.T) { name: "authorized to access id", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { + GetTargetByIDF: func(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: id, OrgID: 10, @@ -75,7 +77,7 @@ func TestScraperTargetStoreService_GetTargetByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { + GetTargetByIDF: func(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: id, OrgID: 10, @@ -94,9 +96,9 @@ func TestScraperTargetStoreService_GetTargetByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/scrapers/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -249,8 +251,8 @@ func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { ScraperTargetStoreService influxdb.ScraperTargetStoreService } type args struct { - id influxdb.ID - bucketID influxdb.ID + id platform.ID + bucketID platform.ID permissions []influxdb.Permission } type wants struct { @@ -267,14 +269,14 @@ func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { name: "authorized to update scraper", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { + GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, BucketID: 100, }, nil }, - UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID influxdb.ID) (*influxdb.ScraperTarget, error) { + UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, @@ -318,14 +320,14 @@ func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { name: "unauthorized to update scraper", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { + GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, BucketID: 100, }, nil }, - UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID influxdb.ID) (*influxdb.ScraperTarget, error) { + UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, @@ -348,9 +350,9 @@ func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/scrapers/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -358,14 +360,14 @@ func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { name: "unauthorized to write to bucket", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { + GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, BucketID: 100, }, nil }, - UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID influxdb.ID) (*influxdb.ScraperTarget, error) { + UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, @@ -388,9 +390,9 @@ func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/buckets/0000000000000064 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -404,7 +406,7 @@ func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { ctx := context.Background() ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - _, err := s.UpdateTarget(ctx, &influxdb.ScraperTarget{ID: tt.args.id, BucketID: tt.args.bucketID}, influxdb.ID(1)) + _, err := s.UpdateTarget(ctx, &influxdb.ScraperTarget{ID: tt.args.id, BucketID: tt.args.bucketID}, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } @@ -415,7 +417,7 @@ func TestScraperTargetStoreService_RemoveTarget(t *testing.T) { ScraperTargetStoreService influxdb.ScraperTargetStoreService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -432,13 +434,13 @@ func TestScraperTargetStoreService_RemoveTarget(t *testing.T) { name: "authorized to delete scraper", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { + GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, }, nil }, - RemoveTargetF: func(ctx context.Context, id influxdb.ID) error { + RemoveTargetF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -470,13 +472,13 @@ func TestScraperTargetStoreService_RemoveTarget(t *testing.T) { name: "unauthorized to delete scraper", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.ScraperTarget, error) { + GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { return &influxdb.ScraperTarget{ ID: 1, OrgID: 10, }, nil }, - RemoveTargetF: func(ctx context.Context, id influxdb.ID) error { + RemoveTargetF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -494,9 +496,9 @@ func TestScraperTargetStoreService_RemoveTarget(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/scrapers/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -522,8 +524,8 @@ func TestScraperTargetStoreService_AddTarget(t *testing.T) { } type args struct { permissions []influxdb.Permission - orgID influxdb.ID - bucketID influxdb.ID + orgID platform.ID + bucketID platform.ID } type wants struct { err error @@ -539,7 +541,7 @@ func TestScraperTargetStoreService_AddTarget(t *testing.T) { name: "authorized to create scraper", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID influxdb.ID) error { + AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { return nil }, }, @@ -572,7 +574,7 @@ func TestScraperTargetStoreService_AddTarget(t *testing.T) { name: "unauthorized to create scraper", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID influxdb.ID) error { + AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { return nil }, }, @@ -598,9 +600,9 @@ func TestScraperTargetStoreService_AddTarget(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/scrapers is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -608,7 +610,7 @@ func TestScraperTargetStoreService_AddTarget(t *testing.T) { name: "unauthorized to write to bucket", fields: fields{ ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID influxdb.ID) error { + AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { return nil }, }, @@ -634,9 +636,9 @@ func TestScraperTargetStoreService_AddTarget(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/buckets/0000000000000064 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -650,7 +652,7 @@ func TestScraperTargetStoreService_AddTarget(t *testing.T) { ctx := context.Background() ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - err := s.AddTarget(ctx, &influxdb.ScraperTarget{OrgID: tt.args.orgID, BucketID: tt.args.bucketID}, influxdb.ID(1)) + err := s.AddTarget(ctx, &influxdb.ScraperTarget{OrgID: tt.args.orgID, BucketID: tt.args.bucketID}, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } diff --git a/authorizer/secret.go b/authorizer/secret.go index baefe02465c..bf816f96187 100644 --- a/authorizer/secret.go +++ b/authorizer/secret.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.SecretService = (*SecretService)(nil) @@ -22,7 +23,7 @@ func NewSecretService(s influxdb.SecretService) *SecretService { } // LoadSecret checks to see if the authorizer on context has read access to the secret key provided. -func (s *SecretService) LoadSecret(ctx context.Context, orgID influxdb.ID, key string) (string, error) { +func (s *SecretService) LoadSecret(ctx context.Context, orgID platform.ID, key string) (string, error) { if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { return "", err } @@ -34,7 +35,7 @@ func (s *SecretService) LoadSecret(ctx context.Context, orgID influxdb.ID, key s } // GetSecretKeys checks to see if the authorizer on context has read access to all the secrets belonging to orgID. -func (s *SecretService) GetSecretKeys(ctx context.Context, orgID influxdb.ID) ([]string, error) { +func (s *SecretService) GetSecretKeys(ctx context.Context, orgID platform.ID) ([]string, error) { if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { return []string{}, err } @@ -46,7 +47,7 @@ func (s *SecretService) GetSecretKeys(ctx context.Context, orgID influxdb.ID) ([ } // PutSecret checks to see if the authorizer on context has write access to the secret key provided. -func (s *SecretService) PutSecret(ctx context.Context, orgID influxdb.ID, key string, val string) error { +func (s *SecretService) PutSecret(ctx context.Context, orgID platform.ID, key string, val string) error { if _, _, err := AuthorizeCreate(ctx, influxdb.SecretsResourceType, orgID); err != nil { return err } @@ -58,7 +59,7 @@ func (s *SecretService) PutSecret(ctx context.Context, orgID influxdb.ID, key st } // PutSecrets checks to see if the authorizer on context has read and write access to the secret keys provided. -func (s *SecretService) PutSecrets(ctx context.Context, orgID influxdb.ID, m map[string]string) error { +func (s *SecretService) PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { // PutSecrets operates on intersection between m and keys beloging to orgID. // We need to have read access to those secrets since it deletes the secrets (within the intersection) that have not be overridden. if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { @@ -75,7 +76,7 @@ func (s *SecretService) PutSecrets(ctx context.Context, orgID influxdb.ID, m map } // PatchSecrets checks to see if the authorizer on context has write access to the secret keys provided. -func (s *SecretService) PatchSecrets(ctx context.Context, orgID influxdb.ID, m map[string]string) error { +func (s *SecretService) PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { return err } @@ -87,7 +88,7 @@ func (s *SecretService) PatchSecrets(ctx context.Context, orgID influxdb.ID, m m } // DeleteSecret checks to see if the authorizer on context has write access to the secret keys provided. -func (s *SecretService) DeleteSecret(ctx context.Context, orgID influxdb.ID, keys ...string) error { +func (s *SecretService) DeleteSecret(ctx context.Context, orgID platform.ID, keys ...string) error { if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { return err } diff --git a/authorizer/secret_test.go b/authorizer/secret_test.go index 34c7664e882..cca5f09093d 100644 --- a/authorizer/secret_test.go +++ b/authorizer/secret_test.go @@ -9,6 +9,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -25,7 +27,7 @@ func TestSecretService_LoadSecret(t *testing.T) { } type args struct { permission influxdb.Permission - org influxdb.ID + org platform.ID key string } type wants struct { @@ -42,12 +44,12 @@ func TestSecretService_LoadSecret(t *testing.T) { name: "authorized to access secret within org", fields: fields{ SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID influxdb.ID, k string) (string, error) { + LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { if k == "key" { return "val", nil } - return "", &influxdb.Error{ - Code: influxdb.ENotFound, + return "", &errors.Error{ + Code: errors.ENotFound, Msg: influxdb.ErrSecretNotFound, } }, @@ -62,7 +64,7 @@ func TestSecretService_LoadSecret(t *testing.T) { }, }, key: "key", - org: influxdb.ID(10), + org: platform.ID(10), }, wants: wants{ err: nil, @@ -72,12 +74,12 @@ func TestSecretService_LoadSecret(t *testing.T) { name: "cannot access not existing secret", fields: fields{ SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID influxdb.ID, k string) (string, error) { + LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { if k == "key" { return "val", nil } - return "", &influxdb.Error{ - Code: influxdb.ENotFound, + return "", &errors.Error{ + Code: errors.ENotFound, Msg: influxdb.ErrSecretNotFound, } }, @@ -92,11 +94,11 @@ func TestSecretService_LoadSecret(t *testing.T) { }, }, key: "not existing", - org: influxdb.ID(10), + org: platform.ID(10), }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.ENotFound, + err: &errors.Error{ + Code: errors.ENotFound, Msg: influxdb.ErrSecretNotFound, }, }, @@ -105,12 +107,12 @@ func TestSecretService_LoadSecret(t *testing.T) { name: "unauthorized to access secret within org", fields: fields{ SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID influxdb.ID, k string) (string, error) { + LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { if k == "key" { return "val", nil } - return "", &influxdb.Error{ - Code: influxdb.ENotFound, + return "", &errors.Error{ + Code: errors.ENotFound, Msg: influxdb.ErrSecretNotFound, } }, @@ -124,13 +126,13 @@ func TestSecretService_LoadSecret(t *testing.T) { ID: influxdbtesting.IDPtr(10), }, }, - org: influxdb.ID(2), + org: platform.ID(2), key: "key", }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/0000000000000002/secrets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -155,7 +157,7 @@ func TestSecretService_GetSecretKeys(t *testing.T) { } type args struct { permission influxdb.Permission - org influxdb.ID + org platform.ID } type wants struct { err error @@ -172,7 +174,7 @@ func TestSecretService_GetSecretKeys(t *testing.T) { name: "authorized to see all secrets within an org", fields: fields{ SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID influxdb.ID) ([]string, error) { + GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { return []string{ "0000000000000001secret1", "0000000000000001secret2", @@ -189,7 +191,7 @@ func TestSecretService_GetSecretKeys(t *testing.T) { OrgID: influxdbtesting.IDPtr(1), }, }, - org: influxdb.ID(1), + org: platform.ID(1), }, wants: wants{ secrets: []string{ @@ -203,7 +205,7 @@ func TestSecretService_GetSecretKeys(t *testing.T) { name: "unauthorized to see all secrets within an org", fields: fields{ SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID influxdb.ID) ([]string, error) { + GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { return []string{ "0000000000000002secret1", "0000000000000002secret2", @@ -220,11 +222,11 @@ func TestSecretService_GetSecretKeys(t *testing.T) { OrgID: influxdbtesting.IDPtr(1), }, }, - org: influxdb.ID(2), + org: platform.ID(2), }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.EUnauthorized, + err: &errors.Error{ + Code: errors.EUnauthorized, Msg: "read:orgs/0000000000000002/secrets is unauthorized", }, secrets: []string{}, @@ -234,9 +236,9 @@ func TestSecretService_GetSecretKeys(t *testing.T) { name: "errors when there are not secret into an org", fields: fields{ SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID influxdb.ID) ([]string, error) { - return []string(nil), &influxdb.Error{ - Code: influxdb.ENotFound, + GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { + return []string(nil), &errors.Error{ + Code: errors.ENotFound, Msg: "organization has no secret keys", } }, @@ -250,11 +252,11 @@ func TestSecretService_GetSecretKeys(t *testing.T) { OrgID: influxdbtesting.IDPtr(10), }, }, - org: influxdb.ID(10), + org: platform.ID(10), }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.ENotFound, + err: &errors.Error{ + Code: errors.ENotFound, Msg: "organization has no secret keys", }, secrets: []string{}, @@ -284,7 +286,7 @@ func TestSecretService_PatchSecrets(t *testing.T) { SecretService influxdb.SecretService } type args struct { - org influxdb.ID + org platform.ID permissions []influxdb.Permission } type wants struct { @@ -301,13 +303,13 @@ func TestSecretService_PatchSecrets(t *testing.T) { name: "authorized to patch secrets", fields: fields{ SecretService: &mock.SecretService{ - PatchSecretsFn: func(ctx context.Context, orgID influxdb.ID, m map[string]string) error { + PatchSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { return nil }, }, }, args: args{ - org: influxdb.ID(1), + org: platform.ID(1), permissions: []influxdb.Permission{ { Action: influxdb.WriteAction, @@ -326,13 +328,13 @@ func TestSecretService_PatchSecrets(t *testing.T) { name: "unauthorized to update secret", fields: fields{ SecretService: &mock.SecretService{ - PatchSecretsFn: func(ctx context.Context, orgID influxdb.ID, m map[string]string) error { + PatchSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { return nil }, }, }, args: args{ - org: influxdb.ID(1), + org: platform.ID(1), permissions: []influxdb.Permission{ { Action: influxdb.ReadAction, @@ -344,9 +346,9 @@ func TestSecretService_PatchSecrets(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000001/secrets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -371,7 +373,7 @@ func TestSecretService_DeleteSecret(t *testing.T) { SecretService influxdb.SecretService } type args struct { - org influxdb.ID + org platform.ID permissions []influxdb.Permission } type wants struct { @@ -388,13 +390,13 @@ func TestSecretService_DeleteSecret(t *testing.T) { name: "authorized to delete secret", fields: fields{ SecretService: &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID influxdb.ID, keys ...string) error { + DeleteSecretFn: func(ctx context.Context, orgID platform.ID, keys ...string) error { return nil }, }, }, args: args{ - org: influxdb.ID(1), + org: platform.ID(1), permissions: []influxdb.Permission{ { Action: influxdb.WriteAction, @@ -413,7 +415,7 @@ func TestSecretService_DeleteSecret(t *testing.T) { name: "unauthorized to delete secret", fields: fields{ SecretService: &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID influxdb.ID, keys ...string) error { + DeleteSecretFn: func(ctx context.Context, orgID platform.ID, keys ...string) error { return nil }, }, @@ -431,9 +433,9 @@ func TestSecretService_DeleteSecret(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -458,7 +460,7 @@ func TestSecretService_PutSecret(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -474,13 +476,13 @@ func TestSecretService_PutSecret(t *testing.T) { name: "authorized to put a secret", fields: fields{ SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID influxdb.ID, key string, val string) error { + PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { return nil }, }, }, args: args{ - orgID: influxdb.ID(10), + orgID: platform.ID(10), permission: influxdb.Permission{ Action: influxdb.WriteAction, Resource: influxdb.Resource{ @@ -497,7 +499,7 @@ func TestSecretService_PutSecret(t *testing.T) { name: "unauthorized to put a secret", fields: fields{ SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID influxdb.ID, key string, val string) error { + PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { return nil }, }, @@ -513,9 +515,9 @@ func TestSecretService_PutSecret(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -540,7 +542,7 @@ func TestSecretService_PutSecrets(t *testing.T) { } type args struct { permissions []influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -556,13 +558,13 @@ func TestSecretService_PutSecrets(t *testing.T) { name: "authorized to put secrets", fields: fields{ SecretService: &mock.SecretService{ - PutSecretsFn: func(ctx context.Context, orgID influxdb.ID, m map[string]string) error { + PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { return nil }, }, }, args: args{ - orgID: influxdb.ID(10), + orgID: platform.ID(10), permissions: []influxdb.Permission{ { Action: influxdb.WriteAction, @@ -588,13 +590,13 @@ func TestSecretService_PutSecrets(t *testing.T) { name: "unauthorized to put secrets", fields: fields{ SecretService: &mock.SecretService{ - PutSecretsFn: func(ctx context.Context, orgID influxdb.ID, m map[string]string) error { + PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { return nil }, }, }, args: args{ - orgID: influxdb.ID(2), + orgID: platform.ID(2), permissions: []influxdb.Permission{ { Action: influxdb.WriteAction, @@ -613,9 +615,9 @@ func TestSecretService_PutSecrets(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/0000000000000002/secrets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -623,10 +625,10 @@ func TestSecretService_PutSecrets(t *testing.T) { name: "unauthorized to put secrets without read access to their org", fields: fields{ SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID influxdb.ID, key string, val string) error { + PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { return nil }, - PutSecretsFn: func(ctx context.Context, orgID influxdb.ID, m map[string]string) error { + PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { return nil }, }, @@ -644,9 +646,9 @@ func TestSecretService_PutSecrets(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/secrets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -654,10 +656,10 @@ func TestSecretService_PutSecrets(t *testing.T) { name: "unauthorized to put secrets without write access to their org", fields: fields{ SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID influxdb.ID, key string, val string) error { + PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { return nil }, - PutSecretsFn: func(ctx context.Context, orgID influxdb.ID, m map[string]string) error { + PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { return nil }, }, @@ -675,9 +677,9 @@ func TestSecretService_PutSecrets(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/source.go b/authorizer/source.go index ff5fa1ab249..90b6ac5879c 100644 --- a/authorizer/source.go +++ b/authorizer/source.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.SourceService = (*SourceService)(nil) @@ -34,7 +35,7 @@ func (s *SourceService) DefaultSource(ctx context.Context) (*influxdb.Source, er } // FindSourceByID checks to see if the authorizer on context has read access to the id provided. -func (s *SourceService) FindSourceByID(ctx context.Context, id influxdb.ID) (*influxdb.Source, error) { +func (s *SourceService) FindSourceByID(ctx context.Context, id platform.ID) (*influxdb.Source, error) { src, err := s.s.FindSourceByID(ctx, id) if err != nil { return nil, err @@ -64,7 +65,7 @@ func (s *SourceService) CreateSource(ctx context.Context, src *influxdb.Source) } // UpdateSource checks to see if the authorizer on context has write access to the source provided. -func (s *SourceService) UpdateSource(ctx context.Context, id influxdb.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { +func (s *SourceService) UpdateSource(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { src, err := s.s.FindSourceByID(ctx, id) if err != nil { return nil, err @@ -76,7 +77,7 @@ func (s *SourceService) UpdateSource(ctx context.Context, id influxdb.ID, upd in } // DeleteSource checks to see if the authorizer on context has write access to the source provided. -func (s *SourceService) DeleteSource(ctx context.Context, id influxdb.ID) error { +func (s *SourceService) DeleteSource(ctx context.Context, id platform.ID) error { src, err := s.s.FindSourceByID(ctx, id) if err != nil { return err diff --git a/authorizer/source_test.go b/authorizer/source_test.go index 1cecbfbca3f..2a41608cf04 100644 --- a/authorizer/source_test.go +++ b/authorizer/source_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -91,9 +93,9 @@ func TestSourceService_DefaultSource(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -118,7 +120,7 @@ func TestSourceService_FindSourceByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -134,7 +136,7 @@ func TestSourceService_FindSourceByID(t *testing.T) { name: "authorized to access id", fields: fields{ SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Source, error) { + FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { return &influxdb.Source{ ID: id, OrganizationID: 10, @@ -160,7 +162,7 @@ func TestSourceService_FindSourceByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Source, error) { + FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { return &influxdb.Source{ ID: id, OrganizationID: 10, @@ -179,9 +181,9 @@ func TestSourceService_FindSourceByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -333,7 +335,7 @@ func TestSourceService_UpdateSource(t *testing.T) { SourceService influxdb.SourceService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -350,13 +352,13 @@ func TestSourceService_UpdateSource(t *testing.T) { name: "authorized to update source", fields: fields{ SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Source, error) { + FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { return &influxdb.Source{ ID: 1, OrganizationID: 10, }, nil }, - UpdateSourceFn: func(ctx context.Context, id influxdb.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { + UpdateSourceFn: func(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { return &influxdb.Source{ ID: 1, OrganizationID: 10, @@ -391,13 +393,13 @@ func TestSourceService_UpdateSource(t *testing.T) { name: "unauthorized to update source", fields: fields{ SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Source, error) { + FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { return &influxdb.Source{ ID: 1, OrganizationID: 10, }, nil }, - UpdateSourceFn: func(ctx context.Context, id influxdb.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { + UpdateSourceFn: func(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { return &influxdb.Source{ ID: 1, OrganizationID: 10, @@ -418,9 +420,9 @@ func TestSourceService_UpdateSource(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -444,7 +446,7 @@ func TestSourceService_DeleteSource(t *testing.T) { SourceService influxdb.SourceService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -461,13 +463,13 @@ func TestSourceService_DeleteSource(t *testing.T) { name: "authorized to delete source", fields: fields{ SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Source, error) { + FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { return &influxdb.Source{ ID: 1, OrganizationID: 10, }, nil }, - DeleteSourceFn: func(ctx context.Context, id influxdb.ID) error { + DeleteSourceFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -499,13 +501,13 @@ func TestSourceService_DeleteSource(t *testing.T) { name: "unauthorized to delete source", fields: fields{ SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Source, error) { + FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { return &influxdb.Source{ ID: 1, OrganizationID: 10, }, nil }, - DeleteSourceFn: func(ctx context.Context, id influxdb.ID) error { + DeleteSourceFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -523,9 +525,9 @@ func TestSourceService_DeleteSource(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -550,7 +552,7 @@ func TestSourceService_CreateSource(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -605,9 +607,9 @@ func TestSourceService_CreateSource(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/sources is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/sql_backup_restore.go b/authorizer/sql_backup_restore.go new file mode 100644 index 00000000000..906b0b5740e --- /dev/null +++ b/authorizer/sql_backup_restore.go @@ -0,0 +1,57 @@ +package authorizer + +import ( + "context" + "io" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/tracing" +) + +var _ influxdb.SqlBackupRestoreService = (*SqlBackupRestoreService)(nil) + +// SqlBackupRestoreService wraps a influxdb.SqlBackupRestoreService and authorizes actions +// against it appropriately. +type SqlBackupRestoreService struct { + s influxdb.SqlBackupRestoreService +} + +// NewSqlBackupRestoreService constructs an instance of an authorizing backup service. +func NewSqlBackupRestoreService(s influxdb.SqlBackupRestoreService) *SqlBackupRestoreService { + return &SqlBackupRestoreService{ + s: s, + } +} + +func (s SqlBackupRestoreService) BackupSqlStore(ctx context.Context, w io.Writer) error { + span, ctx := tracing.StartSpanFromContext(ctx) + defer span.Finish() + + if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { + return err + } + return s.s.BackupSqlStore(ctx, w) +} + +func (s SqlBackupRestoreService) RestoreSqlStore(ctx context.Context, r io.Reader) error { + span, ctx := tracing.StartSpanFromContext(ctx) + defer span.Finish() + + if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { + return err + } + return s.s.RestoreSqlStore(ctx, r) +} + +// The Lock and Unlock methods below do not have authorization checks and should only be used +// when appropriate authorization has already been confirmed, such as behind a middleware. They +// are intended to be used for coordinating the locking and unlocking of the kv and sql metadata +// databases during a backup. They are made available here to allow the calls to pass-through to the +// underlying service. +func (s SqlBackupRestoreService) RLockSqlStore() { + s.s.RLockSqlStore() +} + +func (s SqlBackupRestoreService) RUnlockSqlStore() { + s.s.RUnlockSqlStore() +} diff --git a/authorizer/sql_backup_restore_test.go b/authorizer/sql_backup_restore_test.go new file mode 100644 index 00000000000..b7a5ed2dbc7 --- /dev/null +++ b/authorizer/sql_backup_restore_test.go @@ -0,0 +1,103 @@ +package authorizer_test + +import ( + "bytes" + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/authorizer" + influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform/errors" + "github.com/influxdata/influxdb/v2/mock" + "github.com/stretchr/testify/require" +) + +func Test_BackupSqlStore(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permList []influxdb.Permission + wantErr error + }{ + { + "authorized to do the backup", + influxdb.OperPermissions(), + nil, + }, + { + "not authorized to do the backup", + influxdb.ReadAllPermissions(), + &errors.Error{ + Msg: "write:authorizations is unauthorized", + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockSqlBackupRestoreService(ctrlr) + s := authorizer.NewSqlBackupRestoreService(svc) + + w := bytes.NewBuffer([]byte{}) + + if tt.wantErr == nil { + svc.EXPECT(). + BackupSqlStore(gomock.Any(), w). + Return(nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, tt.permList)) + err := s.BackupSqlStore(ctx, w) + require.Equal(t, tt.wantErr, err) + }) + } +} + +func Test_RestoreSqlStore(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + permList []influxdb.Permission + wantErr error + }{ + { + "authorized to do the restore", + influxdb.OperPermissions(), + nil, + }, + { + "not authorized to do the restore", + influxdb.ReadAllPermissions(), + &errors.Error{ + Msg: "write:authorizations is unauthorized", + Code: errors.EUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrlr := gomock.NewController(t) + svc := mock.NewMockSqlBackupRestoreService(ctrlr) + s := authorizer.NewSqlBackupRestoreService(svc) + + w := bytes.NewBuffer([]byte{}) + + if tt.wantErr == nil { + svc.EXPECT(). + RestoreSqlStore(gomock.Any(), w). + Return(nil) + } + + ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, tt.permList)) + err := s.RestoreSqlStore(ctx, w) + require.Equal(t, tt.wantErr, err) + }) + } +} diff --git a/authorizer/task.go b/authorizer/task.go index 9303d1b8cfe..9538c4a0c1e 100644 --- a/authorizer/task.go +++ b/authorizer/task.go @@ -5,7 +5,10 @@ import ( "fmt" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/kit/tracing" + "github.com/influxdata/influxdb/v2/task/taskmodel" "go.uber.org/zap" ) @@ -20,25 +23,25 @@ func (ae *authError) AuthzError() error { } var ( - ErrInactiveTask = &influxdb.Error{ - Code: influxdb.EInvalid, + ErrInactiveTask = &errors.Error{ + Code: errors.EInvalid, Msg: "inactive task", } - ErrFailedPermission = &influxdb.Error{ - Code: influxdb.EInvalid, + ErrFailedPermission = &errors.Error{ + Code: errors.EInvalid, Msg: "unauthorized", } ) type taskServiceValidator struct { - influxdb.TaskService + taskmodel.TaskService log *zap.Logger } // TaskService wraps ts and checks appropriate permissions before calling requested methods on ts. // Authorization failures are logged to the logger. -func NewTaskService(log *zap.Logger, ts influxdb.TaskService) influxdb.TaskService { +func NewTaskService(log *zap.Logger, ts taskmodel.TaskService) taskmodel.TaskService { return &taskServiceValidator{ TaskService: ts, log: log, @@ -46,7 +49,7 @@ func NewTaskService(log *zap.Logger, ts influxdb.TaskService) influxdb.TaskServi } func (ts *taskServiceValidator) processPermissionError(a influxdb.Authorizer, p influxdb.Permission, err error, loggerFields ...zap.Field) error { - if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + if errors.ErrorCode(err) == errors.EUnauthorized { ts.log.With(loggerFields...).Info("Authorization failed", zap.String("user_id", a.GetUserID().String()), zap.String("auth_kind", a.Kind()), @@ -58,7 +61,7 @@ func (ts *taskServiceValidator) processPermissionError(a influxdb.Authorizer, p return err } -func (ts *taskServiceValidator) FindTaskByID(ctx context.Context, id influxdb.ID) (*influxdb.Task, error) { +func (ts *taskServiceValidator) FindTaskByID(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -76,7 +79,7 @@ func (ts *taskServiceValidator) FindTaskByID(ctx context.Context, id influxdb.ID return task, nil } -func (ts *taskServiceValidator) FindTasks(ctx context.Context, filter influxdb.TaskFilter) ([]*influxdb.Task, int, error) { +func (ts *taskServiceValidator) FindTasks(ctx context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() // Get the tasks in the organization, without authentication. @@ -87,12 +90,12 @@ func (ts *taskServiceValidator) FindTasks(ctx context.Context, filter influxdb.T return AuthorizeFindTasks(ctx, unauthenticatedTasks) } -func (ts *taskServiceValidator) CreateTask(ctx context.Context, t influxdb.TaskCreate) (*influxdb.Task, error) { +func (ts *taskServiceValidator) CreateTask(ctx context.Context, t taskmodel.TaskCreate) (*taskmodel.Task, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() if !t.OwnerID.Valid() { - return nil, influxdb.ErrInvalidOwnerID + return nil, taskmodel.ErrInvalidOwnerID } a, p, err := AuthorizeCreate(ctx, influxdb.TasksResourceType, t.OrganizationID) @@ -103,7 +106,7 @@ func (ts *taskServiceValidator) CreateTask(ctx context.Context, t influxdb.TaskC return ts.TaskService.CreateTask(ctx, t) } -func (ts *taskServiceValidator) UpdateTask(ctx context.Context, id influxdb.ID, upd influxdb.TaskUpdate) (*influxdb.Task, error) { +func (ts *taskServiceValidator) UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -121,7 +124,7 @@ func (ts *taskServiceValidator) UpdateTask(ctx context.Context, id influxdb.ID, return ts.TaskService.UpdateTask(ctx, id, upd) } -func (ts *taskServiceValidator) DeleteTask(ctx context.Context, id influxdb.ID) error { +func (ts *taskServiceValidator) DeleteTask(ctx context.Context, id platform.ID) error { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -139,7 +142,7 @@ func (ts *taskServiceValidator) DeleteTask(ctx context.Context, id influxdb.ID) return ts.TaskService.DeleteTask(ctx, id) } -func (ts *taskServiceValidator) FindLogs(ctx context.Context, filter influxdb.LogFilter) ([]*influxdb.Log, int, error) { +func (ts *taskServiceValidator) FindLogs(ctx context.Context, filter taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -152,7 +155,7 @@ func (ts *taskServiceValidator) FindLogs(ctx context.Context, filter influxdb.Lo return ts.TaskService.FindLogs(ctx, filter) } -func (ts *taskServiceValidator) FindRuns(ctx context.Context, filter influxdb.RunFilter) ([]*influxdb.Run, int, error) { +func (ts *taskServiceValidator) FindRuns(ctx context.Context, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -171,7 +174,7 @@ func (ts *taskServiceValidator) FindRuns(ctx context.Context, filter influxdb.Ru return ts.TaskService.FindRuns(ctx, filter) } -func (ts *taskServiceValidator) FindRunByID(ctx context.Context, taskID, runID influxdb.ID) (*influxdb.Run, error) { +func (ts *taskServiceValidator) FindRunByID(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -189,7 +192,7 @@ func (ts *taskServiceValidator) FindRunByID(ctx context.Context, taskID, runID i return ts.TaskService.FindRunByID(ctx, taskID, runID) } -func (ts *taskServiceValidator) CancelRun(ctx context.Context, taskID, runID influxdb.ID) error { +func (ts *taskServiceValidator) CancelRun(ctx context.Context, taskID, runID platform.ID) error { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -207,7 +210,7 @@ func (ts *taskServiceValidator) CancelRun(ctx context.Context, taskID, runID inf return ts.TaskService.CancelRun(ctx, taskID, runID) } -func (ts *taskServiceValidator) RetryRun(ctx context.Context, taskID, runID influxdb.ID) (*influxdb.Run, error) { +func (ts *taskServiceValidator) RetryRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -217,7 +220,7 @@ func (ts *taskServiceValidator) RetryRun(ctx context.Context, taskID, runID infl return nil, err } - if task.Status != string(influxdb.TaskActive) { + if task.Status != string(taskmodel.TaskActive) { return nil, ErrInactiveTask } @@ -229,7 +232,7 @@ func (ts *taskServiceValidator) RetryRun(ctx context.Context, taskID, runID infl return ts.TaskService.RetryRun(ctx, taskID, runID) } -func (ts *taskServiceValidator) ForceRun(ctx context.Context, taskID influxdb.ID, scheduledFor int64) (*influxdb.Run, error) { +func (ts *taskServiceValidator) ForceRun(ctx context.Context, taskID platform.ID, scheduledFor int64) (*taskmodel.Run, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -239,7 +242,7 @@ func (ts *taskServiceValidator) ForceRun(ctx context.Context, taskID influxdb.ID return nil, err } - if task.Status != string(influxdb.TaskActive) { + if task.Status != string(taskmodel.TaskActive) { return nil, ErrInactiveTask } diff --git a/authorizer/task_test.go b/authorizer/task_test.go index bffe64b3880..2b504bc5210 100644 --- a/authorizer/task_test.go +++ b/authorizer/task_test.go @@ -12,9 +12,11 @@ import ( _ "github.com/influxdata/influxdb/v2/fluxinit/static" "github.com/influxdata/influxdb/v2/http" "github.com/influxdata/influxdb/v2/inmem" + "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/kv" "github.com/influxdata/influxdb/v2/kv/migration/all" "github.com/influxdata/influxdb/v2/mock" + "github.com/influxdata/influxdb/v2/task/taskmodel" "github.com/influxdata/influxdb/v2/tenant" "github.com/pkg/errors" "go.uber.org/zap/zaptest" @@ -26,11 +28,11 @@ func TestOnboardingValidation(t *testing.T) { ts := authorizer.NewTaskService(zaptest.NewLogger(t), mockTaskService(3, 2, 1)) r, err := onboard.OnboardInitialUser(context.Background(), &influxdb.OnboardingRequest{ - User: "Setec Astronomy", - Password: "too many secrets", - Org: "thing", - Bucket: "holder", - RetentionPeriod: 1, + User: "Setec Astronomy", + Password: "too many secrets", + Org: "thing", + Bucket: "holder", + RetentionPeriodSeconds: 1, }) if err != nil { t.Fatal(err) @@ -38,7 +40,7 @@ func TestOnboardingValidation(t *testing.T) { ctx := pctx.SetAuthorizer(context.Background(), r.Auth) - _, err = ts.CreateTask(ctx, influxdb.TaskCreate{ + _, err = ts.CreateTask(ctx, taskmodel.TaskCreate{ OrganizationID: r.Org.ID, OwnerID: r.Auth.GetUserID(), Flux: `option task = { @@ -52,12 +54,12 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, } } -func mockTaskService(orgID, taskID, runID influxdb.ID) influxdb.TaskService { - task := influxdb.Task{ +func mockTaskService(orgID, taskID, runID platform.ID) taskmodel.TaskService { + task := taskmodel.Task{ ID: taskID, OrganizationID: orgID, Name: "cows", - Status: string(influxdb.TaskActive), + Status: string(taskmodel.TaskActive), Flux: `option task = { name: "my_task", every: 1s, @@ -66,51 +68,51 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, Every: "1s", } - log := influxdb.Log{Message: "howdy partner"} + log := taskmodel.Log{Message: "howdy partner"} - run := influxdb.Run{ + run := taskmodel.Run{ ID: runID, TaskID: taskID, Status: "completed", ScheduledFor: time.Now().UTC(), StartedAt: time.Now().UTC().Add(time.Second * 3), FinishedAt: time.Now().UTC().Add(time.Second * 10), - Log: []influxdb.Log{log}, + Log: []taskmodel.Log{log}, } return &mock.TaskService{ - FindTaskByIDFn: func(context.Context, influxdb.ID) (*influxdb.Task, error) { + FindTaskByIDFn: func(context.Context, platform.ID) (*taskmodel.Task, error) { return &task, nil }, - FindTasksFn: func(context.Context, influxdb.TaskFilter) ([]*influxdb.Task, int, error) { - return []*influxdb.Task{&task}, 1, nil + FindTasksFn: func(context.Context, taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { + return []*taskmodel.Task{&task}, 1, nil }, - CreateTaskFn: func(_ context.Context, tc influxdb.TaskCreate) (*influxdb.Task, error) { + CreateTaskFn: func(_ context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { taskCopy := task return &taskCopy, nil }, - UpdateTaskFn: func(context.Context, influxdb.ID, influxdb.TaskUpdate) (*influxdb.Task, error) { + UpdateTaskFn: func(context.Context, platform.ID, taskmodel.TaskUpdate) (*taskmodel.Task, error) { return &task, nil }, - DeleteTaskFn: func(context.Context, influxdb.ID) error { + DeleteTaskFn: func(context.Context, platform.ID) error { return nil }, - FindLogsFn: func(context.Context, influxdb.LogFilter) ([]*influxdb.Log, int, error) { - return []*influxdb.Log{&log}, 1, nil + FindLogsFn: func(context.Context, taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { + return []*taskmodel.Log{&log}, 1, nil }, - FindRunsFn: func(context.Context, influxdb.RunFilter) ([]*influxdb.Run, int, error) { - return []*influxdb.Run{&run}, 1, nil + FindRunsFn: func(context.Context, taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { + return []*taskmodel.Run{&run}, 1, nil }, - FindRunByIDFn: func(context.Context, influxdb.ID, influxdb.ID) (*influxdb.Run, error) { + FindRunByIDFn: func(context.Context, platform.ID, platform.ID) (*taskmodel.Run, error) { return &run, nil }, - CancelRunFn: func(context.Context, influxdb.ID, influxdb.ID) error { + CancelRunFn: func(context.Context, platform.ID, platform.ID) error { return nil }, - RetryRunFn: func(context.Context, influxdb.ID, influxdb.ID) (*influxdb.Run, error) { + RetryRunFn: func(context.Context, platform.ID, platform.ID) (*taskmodel.Run, error) { return &run, nil }, - ForceRunFn: func(context.Context, influxdb.ID, int64) (*influxdb.Run, error) { + ForceRunFn: func(context.Context, platform.ID, int64) (*taskmodel.Run, error) { return &run, nil }, } @@ -118,19 +120,19 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, func TestValidations(t *testing.T) { var ( - taskID = influxdb.ID(0x7456) - runID = influxdb.ID(0x402) + taskID = platform.ID(0x7456) + runID = platform.ID(0x402) otherOrg = &influxdb.Organization{Name: "other_org"} ) svc, onboard := setup(t) r, err := onboard.OnboardInitialUser(context.Background(), &influxdb.OnboardingRequest{ - User: "Setec Astronomy", - Password: "too many secrets", - Org: "thing", - Bucket: "holder", - RetentionPeriod: 1, + User: "Setec Astronomy", + Password: "too many secrets", + Org: "thing", + Bucket: "holder", + RetentionPeriodSeconds: 1, }) if err != nil { t.Fatal(err) @@ -195,13 +197,13 @@ func TestValidations(t *testing.T) { tests := []struct { name string - check func(context.Context, influxdb.TaskService) error + check func(context.Context, taskmodel.TaskService) error auth *influxdb.Authorization }{ { name: "create failure", - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, err := svc.CreateTask(ctx, influxdb.TaskCreate{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, err := svc.CreateTask(ctx, taskmodel.TaskCreate{ OrganizationID: r.Org.ID, Flux: `option task = { name: "my_task", @@ -219,8 +221,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "create success", auth: r.Auth, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, err := svc.CreateTask(ctx, influxdb.TaskCreate{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, err := svc.CreateTask(ctx, taskmodel.TaskCreate{ OrganizationID: r.Org.ID, OwnerID: r.Auth.GetUserID(), Flux: `option task = { @@ -235,7 +237,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "FindTaskByID missing auth", auth: &influxdb.Authorization{Permissions: []influxdb.Permission{}}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.FindTaskByID(ctx, taskID) if err == nil { return errors.New("returned without error without permission") @@ -246,7 +248,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "FindTaskByID with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.FindTaskByID(ctx, taskID) return err }, @@ -254,7 +256,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "FindTaskByID with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.FindTaskByID(ctx, taskID) return err }, @@ -262,8 +264,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "FindTasks with bad auth", auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - ts, _, err := svc.FindTasks(ctx, influxdb.TaskFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + ts, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{ OrganizationID: &orgID, }) if err == nil && len(ts) > 0 { @@ -275,8 +277,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "FindTasks with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindTasks(ctx, influxdb.TaskFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{ OrganizationID: &orgID, }) return err @@ -285,8 +287,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "FindTasks with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindTasks(ctx, influxdb.TaskFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{ OrganizationID: &orgID, }) return err @@ -295,21 +297,21 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, { name: "FindTasks without org filter", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindTasks(ctx, influxdb.TaskFilter{}) + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{}) return err }, }, { name: "UpdateTask with readonly auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { flux := `option task = { name: "my_task", every: 1s, } from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` - _, err := svc.UpdateTask(ctx, taskID, influxdb.TaskUpdate{ + _, err := svc.UpdateTask(ctx, taskID, taskmodel.TaskUpdate{ Flux: &flux, }) if err == nil { @@ -321,13 +323,13 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "UpdateTask with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskBucketPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { flux := `option task = { name: "my_task", every: 1s, } from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` - _, err := svc.UpdateTask(ctx, taskID, influxdb.TaskUpdate{ + _, err := svc.UpdateTask(ctx, taskID, taskmodel.TaskUpdate{ Flux: &flux, }) return err @@ -336,13 +338,13 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "UpdateTask with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskBucketPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { flux := `option task = { name: "my_task", every: 1s, } from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` - _, err := svc.UpdateTask(ctx, taskID, influxdb.TaskUpdate{ + _, err := svc.UpdateTask(ctx, taskID, taskmodel.TaskUpdate{ Flux: &flux, }) return err @@ -351,7 +353,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "DeleteTask missing auth", auth: &influxdb.Authorization{Permissions: []influxdb.Permission{}}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { err := svc.DeleteTask(ctx, taskID) if err == nil { return errors.New("returned without error without permission") @@ -362,7 +364,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "DeleteTask readonly auth", auth: &influxdb.Authorization{Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { err := svc.DeleteTask(ctx, taskID) if err == nil { return errors.New("returned without error without permission") @@ -373,7 +375,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "DeleteTask with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { err := svc.DeleteTask(ctx, taskID) return err }, @@ -381,7 +383,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "DeleteTask with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { err := svc.DeleteTask(ctx, taskID) return err }, @@ -389,8 +391,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindLogs with bad auth", auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindLogs(ctx, influxdb.LogFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindLogs(ctx, taskmodel.LogFilter{ Task: taskID, }) if err == nil { @@ -402,8 +404,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindLogs with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindLogs(ctx, influxdb.LogFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindLogs(ctx, taskmodel.LogFilter{ Task: taskID, }) return err @@ -412,8 +414,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindLogs with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindLogs(ctx, influxdb.LogFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindLogs(ctx, taskmodel.LogFilter{ Task: taskID, }) return err @@ -422,8 +424,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindRuns with bad auth", auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindRuns(ctx, influxdb.RunFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindRuns(ctx, taskmodel.RunFilter{ Task: taskID, }) if err == nil { @@ -435,8 +437,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindRuns with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindRuns(ctx, influxdb.RunFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindRuns(ctx, taskmodel.RunFilter{ Task: taskID, }) return err @@ -445,8 +447,8 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindRuns with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { - _, _, err := svc.FindRuns(ctx, influxdb.RunFilter{ + check: func(ctx context.Context, svc taskmodel.TaskService) error { + _, _, err := svc.FindRuns(ctx, taskmodel.RunFilter{ Task: taskID, }) return err @@ -455,7 +457,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindRunByID missing auth", auth: &influxdb.Authorization{Permissions: []influxdb.Permission{}}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.FindRunByID(ctx, taskID, 10) if err == nil { return errors.New("returned without error without permission") @@ -466,7 +468,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindRunByID with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.FindRunByID(ctx, taskID, 10) return err }, @@ -474,7 +476,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "FindRunByID with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.FindRunByID(ctx, taskID, 10) return err }, @@ -482,7 +484,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "CancelRun with bad auth", auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { err := svc.CancelRun(ctx, taskID, 10) if err == nil { return errors.New("returned no error with a invalid auth") @@ -493,7 +495,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "CancelRun with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { err := svc.CancelRun(ctx, taskID, 10) return err }, @@ -501,7 +503,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "CancelRun with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { err := svc.CancelRun(ctx, taskID, 10) return err }, @@ -509,7 +511,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "RetryRun with bad auth", auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.RetryRun(ctx, taskID, 10) if err == nil { return errors.New("returned no error with a invalid auth") @@ -520,7 +522,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "RetryRun with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.RetryRun(ctx, taskID, 10) return err }, @@ -528,7 +530,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "RetryRun with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.RetryRun(ctx, taskID, 10) return err }, @@ -536,7 +538,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "ForceRun with bad auth", auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.ForceRun(ctx, taskID, 10000) if err == nil { return errors.New("returned no error with a invalid auth") @@ -547,7 +549,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "ForceRun with org auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.ForceRun(ctx, taskID, 10000) return err }, @@ -555,7 +557,7 @@ from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` { name: "ForceRun with task auth", auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc influxdb.TaskService) error { + check: func(ctx context.Context, svc taskmodel.TaskService) error { _, err := svc.ForceRun(ctx, taskID, 10000) return err }, diff --git a/authorizer/telegraf.go b/authorizer/telegraf.go index 958692641b7..614c7eabd95 100644 --- a/authorizer/telegraf.go +++ b/authorizer/telegraf.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.TelegrafConfigStore = (*TelegrafConfigService)(nil) @@ -24,7 +25,7 @@ func NewTelegrafConfigService(s influxdb.TelegrafConfigStore, urm influxdb.UserR } // FindTelegrafConfigByID checks to see if the authorizer on context has read access to the id provided. -func (s *TelegrafConfigService) FindTelegrafConfigByID(ctx context.Context, id influxdb.ID) (*influxdb.TelegrafConfig, error) { +func (s *TelegrafConfigService) FindTelegrafConfigByID(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { tc, err := s.s.FindTelegrafConfigByID(ctx, id) if err != nil { return nil, err @@ -47,7 +48,7 @@ func (s *TelegrafConfigService) FindTelegrafConfigs(ctx context.Context, filter } // CreateTelegrafConfig checks to see if the authorizer on context has write access to the global telegraf config resource. -func (s *TelegrafConfigService) CreateTelegrafConfig(ctx context.Context, tc *influxdb.TelegrafConfig, userID influxdb.ID) error { +func (s *TelegrafConfigService) CreateTelegrafConfig(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { if _, _, err := AuthorizeCreate(ctx, influxdb.TelegrafsResourceType, tc.OrgID); err != nil { return err } @@ -55,7 +56,7 @@ func (s *TelegrafConfigService) CreateTelegrafConfig(ctx context.Context, tc *in } // UpdateTelegrafConfig checks to see if the authorizer on context has write access to the telegraf config provided. -func (s *TelegrafConfigService) UpdateTelegrafConfig(ctx context.Context, id influxdb.ID, upd *influxdb.TelegrafConfig, userID influxdb.ID) (*influxdb.TelegrafConfig, error) { +func (s *TelegrafConfigService) UpdateTelegrafConfig(ctx context.Context, id platform.ID, upd *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { tc, err := s.FindTelegrafConfigByID(ctx, id) if err != nil { return nil, err @@ -67,7 +68,7 @@ func (s *TelegrafConfigService) UpdateTelegrafConfig(ctx context.Context, id inf } // DeleteTelegrafConfig checks to see if the authorizer on context has write access to the telegraf config provided. -func (s *TelegrafConfigService) DeleteTelegrafConfig(ctx context.Context, id influxdb.ID) error { +func (s *TelegrafConfigService) DeleteTelegrafConfig(ctx context.Context, id platform.ID) error { tc, err := s.FindTelegrafConfigByID(ctx, id) if err != nil { return err diff --git a/authorizer/telegraf_test.go b/authorizer/telegraf_test.go index 64a00581fc8..85581e19d82 100644 --- a/authorizer/telegraf_test.go +++ b/authorizer/telegraf_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -33,7 +35,7 @@ func TestTelegrafConfigStore_FindTelegrafConfigByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -49,7 +51,7 @@ func TestTelegrafConfigStore_FindTelegrafConfigByID(t *testing.T) { name: "authorized to access id", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.TelegrafConfig, error) { + FindTelegrafConfigByIDF: func(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: id, OrgID: 10, @@ -75,7 +77,7 @@ func TestTelegrafConfigStore_FindTelegrafConfigByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.TelegrafConfig, error) { + FindTelegrafConfigByIDF: func(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: id, OrgID: 10, @@ -94,9 +96,9 @@ func TestTelegrafConfigStore_FindTelegrafConfigByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/telegrafs/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -248,7 +250,7 @@ func TestTelegrafConfigStore_UpdateTelegrafConfig(t *testing.T) { TelegrafConfigStore influxdb.TelegrafConfigStore } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -265,13 +267,13 @@ func TestTelegrafConfigStore_UpdateTelegrafConfig(t *testing.T) { name: "authorized to update telegraf", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.TelegrafConfig, error) { + FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: 1, OrgID: 10, }, nil }, - UpdateTelegrafConfigF: func(ctx context.Context, id influxdb.ID, upd *influxdb.TelegrafConfig, userID influxdb.ID) (*influxdb.TelegrafConfig, error) { + UpdateTelegrafConfigF: func(ctx context.Context, id platform.ID, upd *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: 1, OrgID: 10, @@ -306,13 +308,13 @@ func TestTelegrafConfigStore_UpdateTelegrafConfig(t *testing.T) { name: "unauthorized to update telegraf", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.TelegrafConfig, error) { + FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: 1, OrgID: 10, }, nil }, - UpdateTelegrafConfigF: func(ctx context.Context, id influxdb.ID, upd *influxdb.TelegrafConfig, userID influxdb.ID) (*influxdb.TelegrafConfig, error) { + UpdateTelegrafConfigF: func(ctx context.Context, id platform.ID, upd *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: 1, OrgID: 10, @@ -333,9 +335,9 @@ func TestTelegrafConfigStore_UpdateTelegrafConfig(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/telegrafs/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -348,7 +350,7 @@ func TestTelegrafConfigStore_UpdateTelegrafConfig(t *testing.T) { ctx := context.Background() ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - _, err := s.UpdateTelegrafConfig(ctx, tt.args.id, &influxdb.TelegrafConfig{}, influxdb.ID(1)) + _, err := s.UpdateTelegrafConfig(ctx, tt.args.id, &influxdb.TelegrafConfig{}, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } @@ -359,7 +361,7 @@ func TestTelegrafConfigStore_DeleteTelegrafConfig(t *testing.T) { TelegrafConfigStore influxdb.TelegrafConfigStore } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -376,13 +378,13 @@ func TestTelegrafConfigStore_DeleteTelegrafConfig(t *testing.T) { name: "authorized to delete telegraf", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.TelegrafConfig, error) { + FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: 1, OrgID: 10, }, nil }, - DeleteTelegrafConfigF: func(ctx context.Context, id influxdb.ID) error { + DeleteTelegrafConfigF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -414,13 +416,13 @@ func TestTelegrafConfigStore_DeleteTelegrafConfig(t *testing.T) { name: "unauthorized to delete telegraf", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id influxdb.ID) (*influxdb.TelegrafConfig, error) { + FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { return &influxdb.TelegrafConfig{ ID: 1, OrgID: 10, }, nil }, - DeleteTelegrafConfigF: func(ctx context.Context, id influxdb.ID) error { + DeleteTelegrafConfigF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -438,9 +440,9 @@ func TestTelegrafConfigStore_DeleteTelegrafConfig(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/telegrafs/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -465,7 +467,7 @@ func TestTelegrafConfigStore_CreateTelegrafConfig(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -481,7 +483,7 @@ func TestTelegrafConfigStore_CreateTelegrafConfig(t *testing.T) { name: "authorized to create telegraf", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - CreateTelegrafConfigF: func(ctx context.Context, tc *influxdb.TelegrafConfig, userID influxdb.ID) error { + CreateTelegrafConfigF: func(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { return nil }, }, @@ -504,7 +506,7 @@ func TestTelegrafConfigStore_CreateTelegrafConfig(t *testing.T) { name: "unauthorized to create telegraf", fields: fields{ TelegrafConfigStore: &mock.TelegrafConfigStore{ - CreateTelegrafConfigF: func(ctx context.Context, tc *influxdb.TelegrafConfig, userID influxdb.ID) error { + CreateTelegrafConfigF: func(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { return nil }, }, @@ -520,9 +522,9 @@ func TestTelegrafConfigStore_CreateTelegrafConfig(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/telegrafs is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -535,7 +537,7 @@ func TestTelegrafConfigStore_CreateTelegrafConfig(t *testing.T) { ctx := context.Background() ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - err := s.CreateTelegrafConfig(ctx, &influxdb.TelegrafConfig{OrgID: tt.args.orgID}, influxdb.ID(1)) + err := s.CreateTelegrafConfig(ctx, &influxdb.TelegrafConfig{OrgID: tt.args.orgID}, platform.ID(1)) influxdbtesting.ErrorsEqual(t, err, tt.wants.err) }) } diff --git a/authorizer/urm.go b/authorizer/urm.go index b57097a3f9e..b59b0d75ad3 100644 --- a/authorizer/urm.go +++ b/authorizer/urm.go @@ -4,10 +4,11 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) type OrgIDResolver interface { - FindResourceOrganizationID(ctx context.Context, rt influxdb.ResourceType, id influxdb.ID) (influxdb.ID, error) + FindResourceOrganizationID(ctx context.Context, rt influxdb.ResourceType, id platform.ID) (platform.ID, error) } type URMService struct { @@ -41,7 +42,7 @@ func (s *URMService) CreateUserResourceMapping(ctx context.Context, m *influxdb. return s.s.CreateUserResourceMapping(ctx, m) } -func (s *URMService) DeleteUserResourceMapping(ctx context.Context, resourceID influxdb.ID, userID influxdb.ID) error { +func (s *URMService) DeleteUserResourceMapping(ctx context.Context, resourceID platform.ID, userID platform.ID) error { f := influxdb.UserResourceMappingFilter{ResourceID: resourceID, UserID: userID} urms, _, err := s.s.FindUserResourceMappings(ctx, f) if err != nil { diff --git a/authorizer/urm_test.go b/authorizer/urm_test.go index e1dd7de79e6..8d2152fde3e 100644 --- a/authorizer/urm_test.go +++ b/authorizer/urm_test.go @@ -8,15 +8,17 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) type OrgService struct { - OrgID influxdb.ID + OrgID platform.ID } -func (s *OrgService) FindResourceOrganizationID(ctx context.Context, rt influxdb.ResourceType, id influxdb.ID) (influxdb.ID, error) { +func (s *OrgService) FindResourceOrganizationID(ctx context.Context, rt influxdb.ResourceType, id platform.ID) (platform.ID, error) { return s.OrgID, nil } @@ -169,7 +171,7 @@ func TestURMService_WriteUserResourceMapping(t *testing.T) { CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { return nil }, - DeleteMappingFn: func(ctx context.Context, rid, uid influxdb.ID) error { + DeleteMappingFn: func(ctx context.Context, rid, uid platform.ID) error { return nil }, FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { @@ -204,7 +206,7 @@ func TestURMService_WriteUserResourceMapping(t *testing.T) { CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { return nil }, - DeleteMappingFn: func(ctx context.Context, rid, uid influxdb.ID) error { + DeleteMappingFn: func(ctx context.Context, rid, uid platform.ID) error { return nil }, FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { @@ -228,9 +230,9 @@ func TestURMService_WriteUserResourceMapping(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/user.go b/authorizer/user.go index de3ba56cfe6..68702212551 100644 --- a/authorizer/user.go +++ b/authorizer/user.go @@ -4,6 +4,8 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) var _ influxdb.UserService = (*UserService)(nil) @@ -22,7 +24,7 @@ func NewUserService(s influxdb.UserService) *UserService { } // FindUserByID checks to see if the authorizer on context has read access to the id provided. -func (s *UserService) FindUserByID(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { +func (s *UserService) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { if _, _, err := AuthorizeReadResource(ctx, influxdb.UsersResourceType, id); err != nil { return nil, err } @@ -61,7 +63,7 @@ func (s *UserService) CreateUser(ctx context.Context, o *influxdb.User) error { } // UpdateUser checks to see if the authorizer on context has write access to the user provided. -func (s *UserService) UpdateUser(ctx context.Context, id influxdb.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { +func (s *UserService) UpdateUser(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, id); err != nil { return nil, err } @@ -69,16 +71,16 @@ func (s *UserService) UpdateUser(ctx context.Context, id influxdb.ID, upd influx } // DeleteUser checks to see if the authorizer on context has write access to the user provided. -func (s *UserService) DeleteUser(ctx context.Context, id influxdb.ID) error { +func (s *UserService) DeleteUser(ctx context.Context, id platform.ID) error { if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, id); err != nil { return err } return s.s.DeleteUser(ctx, id) } -func (s *UserService) FindPermissionForUser(ctx context.Context, uid influxdb.ID) (influxdb.PermissionSet, error) { - return nil, &influxdb.Error{ - Code: influxdb.EInternal, +func (s *UserService) FindPermissionForUser(ctx context.Context, uid platform.ID) (influxdb.PermissionSet, error) { + return nil, &errors.Error{ + Code: errors.EInternal, Msg: "not implemented", } } diff --git a/authorizer/user_test.go b/authorizer/user_test.go index 0f7e125816c..f960adfa916 100644 --- a/authorizer/user_test.go +++ b/authorizer/user_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -33,7 +35,7 @@ func TestUserService_FindUserByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -49,7 +51,7 @@ func TestUserService_FindUserByID(t *testing.T) { name: "authorized to access id", fields: fields{ UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { + FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { return &influxdb.User{ ID: id, }, nil @@ -74,7 +76,7 @@ func TestUserService_FindUserByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.User, error) { + FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { return &influxdb.User{ ID: id, }, nil @@ -92,9 +94,9 @@ func TestUserService_FindUserByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -175,9 +177,9 @@ func TestUserService_FindUser(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -315,7 +317,7 @@ func TestUserService_UpdateUser(t *testing.T) { UserService influxdb.UserService } type args struct { - id influxdb.ID + id platform.ID permission influxdb.Permission } type wants struct { @@ -332,7 +334,7 @@ func TestUserService_UpdateUser(t *testing.T) { name: "authorized to update user", fields: fields{ UserService: &mock.UserService{ - UpdateUserFn: func(ctx context.Context, id influxdb.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { + UpdateUserFn: func(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { return &influxdb.User{ ID: 1, }, nil @@ -357,7 +359,7 @@ func TestUserService_UpdateUser(t *testing.T) { name: "unauthorized to update user", fields: fields{ UserService: &mock.UserService{ - UpdateUserFn: func(ctx context.Context, id influxdb.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { + UpdateUserFn: func(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { return &influxdb.User{ ID: 1, }, nil @@ -375,9 +377,9 @@ func TestUserService_UpdateUser(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -401,7 +403,7 @@ func TestUserService_DeleteUser(t *testing.T) { UserService influxdb.UserService } type args struct { - id influxdb.ID + id platform.ID permission influxdb.Permission } type wants struct { @@ -418,7 +420,7 @@ func TestUserService_DeleteUser(t *testing.T) { name: "authorized to delete user", fields: fields{ UserService: &mock.UserService{ - DeleteUserFn: func(ctx context.Context, id influxdb.ID) error { + DeleteUserFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -441,7 +443,7 @@ func TestUserService_DeleteUser(t *testing.T) { name: "unauthorized to delete user", fields: fields{ UserService: &mock.UserService{ - DeleteUserFn: func(ctx context.Context, id influxdb.ID) error { + DeleteUserFn: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -457,9 +459,9 @@ func TestUserService_DeleteUser(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:users/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -535,9 +537,9 @@ func TestUserService_CreateUser(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:users is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authorizer/variable.go b/authorizer/variable.go index 15b5cf37875..b8d2608b2b0 100644 --- a/authorizer/variable.go +++ b/authorizer/variable.go @@ -4,6 +4,7 @@ import ( "context" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" ) var _ influxdb.VariableService = (*VariableService)(nil) @@ -22,7 +23,7 @@ func NewVariableService(s influxdb.VariableService) *VariableService { } // FindVariableByID checks to see if the authorizer on context has read access to the id provided. -func (s *VariableService) FindVariableByID(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { +func (s *VariableService) FindVariableByID(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { v, err := s.s.FindVariableByID(ctx, id) if err != nil { return nil, err @@ -53,7 +54,7 @@ func (s *VariableService) CreateVariable(ctx context.Context, v *influxdb.Variab } // UpdateVariable checks to see if the authorizer on context has write access to the variable provided. -func (s *VariableService) UpdateVariable(ctx context.Context, id influxdb.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { +func (s *VariableService) UpdateVariable(ctx context.Context, id platform.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { v, err := s.FindVariableByID(ctx, id) if err != nil { return nil, err @@ -77,7 +78,7 @@ func (s *VariableService) ReplaceVariable(ctx context.Context, m *influxdb.Varia } // DeleteVariable checks to see if the authorizer on context has write access to the variable provided. -func (s *VariableService) DeleteVariable(ctx context.Context, id influxdb.ID) error { +func (s *VariableService) DeleteVariable(ctx context.Context, id platform.ID) error { v, err := s.FindVariableByID(ctx, id) if err != nil { return err diff --git a/authorizer/variable_test.go b/authorizer/variable_test.go index c5cf261f019..dbde4ee8497 100644 --- a/authorizer/variable_test.go +++ b/authorizer/variable_test.go @@ -10,6 +10,8 @@ import ( "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/authorizer" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -33,7 +35,7 @@ func TestVariableService_FindVariableByID(t *testing.T) { } type args struct { permission influxdb.Permission - id influxdb.ID + id platform.ID } type wants struct { err error @@ -49,7 +51,7 @@ func TestVariableService_FindVariableByID(t *testing.T) { name: "authorized to access id", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: id, OrganizationID: 10, @@ -75,7 +77,7 @@ func TestVariableService_FindVariableByID(t *testing.T) { name: "unauthorized to access id", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: id, OrganizationID: 10, @@ -94,9 +96,9 @@ func TestVariableService_FindVariableByID(t *testing.T) { id: 1, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "read:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -248,7 +250,7 @@ func TestVariableService_UpdateVariable(t *testing.T) { VariableService influxdb.VariableService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -265,13 +267,13 @@ func TestVariableService_UpdateVariable(t *testing.T) { name: "authorized to update variable", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, }, nil }, - UpdateVariableF: func(ctx context.Context, id influxdb.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { + UpdateVariableF: func(ctx context.Context, id platform.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, @@ -306,13 +308,13 @@ func TestVariableService_UpdateVariable(t *testing.T) { name: "unauthorized to update variable", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, }, nil }, - UpdateVariableF: func(ctx context.Context, id influxdb.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { + UpdateVariableF: func(ctx context.Context, id platform.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, @@ -333,9 +335,9 @@ func TestVariableService_UpdateVariable(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -376,7 +378,7 @@ func TestVariableService_ReplaceVariable(t *testing.T) { name: "authorized to replace variable", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, @@ -418,7 +420,7 @@ func TestVariableService_ReplaceVariable(t *testing.T) { name: "unauthorized to replace variable", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, @@ -445,9 +447,9 @@ func TestVariableService_ReplaceVariable(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -471,7 +473,7 @@ func TestVariableService_DeleteVariable(t *testing.T) { VariableService influxdb.VariableService } type args struct { - id influxdb.ID + id platform.ID permissions []influxdb.Permission } type wants struct { @@ -488,13 +490,13 @@ func TestVariableService_DeleteVariable(t *testing.T) { name: "authorized to delete variable", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, }, nil }, - DeleteVariableF: func(ctx context.Context, id influxdb.ID) error { + DeleteVariableF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -526,13 +528,13 @@ func TestVariableService_DeleteVariable(t *testing.T) { name: "unauthorized to delete variable", fields: fields{ VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.Variable, error) { + FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { return &influxdb.Variable{ ID: 1, OrganizationID: 10, }, nil }, - DeleteVariableF: func(ctx context.Context, id influxdb.ID) error { + DeleteVariableF: func(ctx context.Context, id platform.ID) error { return nil }, }, @@ -550,9 +552,9 @@ func TestVariableService_DeleteVariable(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, @@ -577,7 +579,7 @@ func TestVariableService_CreateVariable(t *testing.T) { } type args struct { permission influxdb.Permission - orgID influxdb.ID + orgID platform.ID } type wants struct { err error @@ -632,9 +634,9 @@ func TestVariableService_CreateVariable(t *testing.T) { }, }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Msg: "write:orgs/000000000000000a/variables is unauthorized", - Code: influxdb.EUnauthorized, + Code: errors.EUnauthorized, }, }, }, diff --git a/authz.go b/authz.go index c70cb200f13..a4607cb3072 100644 --- a/authz.go +++ b/authz.go @@ -3,8 +3,10 @@ package influxdb import ( "errors" "fmt" - "os" - "path/filepath" + "path" + + "github.com/influxdata/influxdb/v2/kit/platform" + errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" ) var ( @@ -22,10 +24,10 @@ type Authorizer interface { PermissionSet() (PermissionSet, error) // ID returns an identifier used for auditing. - Identifier() ID + Identifier() platform.ID // GetUserID returns the user id. - GetUserID() ID + GetUserID() platform.ID // Kind metadata for auditing. Kind() string @@ -74,22 +76,22 @@ type ResourceType string // Resource is an authorizable resource. type Resource struct { Type ResourceType `json:"type"` - ID *ID `json:"id,omitempty"` - OrgID *ID `json:"orgID,omitempty"` + ID *platform.ID `json:"id,omitempty"` + OrgID *platform.ID `json:"orgID,omitempty"` } // String stringifies a resource func (r Resource) String() string { if r.OrgID != nil && r.ID != nil { - return filepath.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type), r.ID.String()) + return path.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type), r.ID.String()) } if r.OrgID != nil { - return filepath.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type)) + return path.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type)) } if r.ID != nil { - return filepath.Join(string(r.Type), r.ID.String()) + return path.Join(string(r.Type), r.ID.String()) } return string(r.Type) @@ -132,6 +134,16 @@ const ( ChecksResourceType = ResourceType("checks") // 16 // DBRPType gives permission to one or more DBRPs. DBRPResourceType = ResourceType("dbrp") // 17 + // NotebooksResourceType gives permission to one or more notebooks. + NotebooksResourceType = ResourceType("notebooks") // 18 + // AnnotationsResourceType gives permission to one or more annotations. + AnnotationsResourceType = ResourceType("annotations") // 19 + // RemotesResourceType gives permission to one or more remote connections. + RemotesResourceType = ResourceType("remotes") // 20 + // ReplicationsResourceType gives permission to one or more replications. + ReplicationsResourceType = ResourceType("replications") // 21 + // InstanceResourceType is a special permission that allows ownership of the entire instance (creating orgs/operator tokens/etc) + InstanceResourceType = ResourceType("instance") // 22 ) // AllResourceTypes is the list of all known resource types. @@ -154,26 +166,14 @@ var AllResourceTypes = []ResourceType{ NotificationEndpointResourceType, // 15 ChecksResourceType, // 16 DBRPResourceType, // 17 + NotebooksResourceType, // 18 + AnnotationsResourceType, // 19 + RemotesResourceType, // 20 + ReplicationsResourceType, // 21 + InstanceResourceType, // 22 // NOTE: when modifying this list, please update the swagger for components.schemas.Permission resource enum. } -// OrgResourceTypes is the list of all known resource types that belong to an organization. -var OrgResourceTypes = []ResourceType{ - BucketsResourceType, // 1 - DashboardsResourceType, // 2 - SourcesResourceType, // 4 - TasksResourceType, // 5 - TelegrafsResourceType, // 6 - UsersResourceType, // 7 - VariablesResourceType, // 8 - SecretsResourceType, // 10 - DocumentsResourceType, // 13 - NotificationRuleResourceType, // 14 - NotificationEndpointResourceType, // 15 - ChecksResourceType, // 16 - DBRPResourceType, // 17 -} - // Valid checks if the resource type is a member of the ResourceType enum. func (r Resource) Valid() (err error) { return r.Type.Valid() @@ -200,6 +200,11 @@ func (t ResourceType) Valid() (err error) { case NotificationEndpointResourceType: // 15 case ChecksResourceType: // 16 case DBRPResourceType: // 17 + case NotebooksResourceType: // 18 + case AnnotationsResourceType: // 19 + case RemotesResourceType: // 20 + case ReplicationsResourceType: // 21 + case InstanceResourceType: // 22 default: err = ErrInvalidResourceType } @@ -219,17 +224,8 @@ type Permission struct { Resource Resource `json:"resource"` } -var newMatchBehavior bool - -func init() { - _, newMatchBehavior = os.LookupEnv("MATCHER_BEHAVIOR") -} - // Matches returns whether or not one permission matches the other. func (p Permission) Matches(perm Permission) bool { - if newMatchBehavior { - return p.matchesV2(perm) - } return p.matchesV1(perm) } @@ -238,6 +234,10 @@ func (p Permission) matchesV1(perm Permission) bool { return false } + if p.Resource.Type == InstanceResourceType { + return true + } + if p.Resource.Type != perm.Resource.Type { return false } @@ -276,53 +276,6 @@ func (p Permission) matchesV1(perm Permission) bool { return false } -func (p Permission) matchesV2(perm Permission) bool { - if p.Action != perm.Action { - return false - } - - if p.Resource.Type != perm.Resource.Type { - return false - } - - if p.Resource.OrgID == nil && p.Resource.ID == nil { - return true - } - - if p.Resource.OrgID != nil && perm.Resource.OrgID != nil && p.Resource.ID != nil && perm.Resource.ID != nil { - if *p.Resource.OrgID != *perm.Resource.OrgID && *p.Resource.ID == *perm.Resource.ID { - fmt.Printf("v2: old match used: p.Resource.OrgID=%s perm.Resource.OrgID=%s p.Resource.ID=%s", - *p.Resource.OrgID, *perm.Resource.OrgID, *p.Resource.ID) - } - } - - if p.Resource.OrgID != nil { - if perm.Resource.OrgID != nil { - if *p.Resource.OrgID == *perm.Resource.OrgID { - if p.Resource.ID == nil { - return true - } - if perm.Resource.ID != nil { - return *p.Resource.ID == *perm.Resource.ID - } - } - return false - } - } - - if p.Resource.ID != nil { - pID := *p.Resource.ID - if perm.Resource.ID != nil { - permID := *perm.Resource.ID - if pID == permID { - return true - } - } - } - - return false -} - func (p Permission) String() string { return fmt.Sprintf("%s:%s", p.Action, p.Resource) } @@ -330,33 +283,33 @@ func (p Permission) String() string { // Valid checks if there the resource and action provided is known. func (p *Permission) Valid() error { if err := p.Resource.Valid(); err != nil { - return &Error{ - Code: EInvalid, + return &errors2.Error{ + Code: errors2.EInvalid, Err: err, Msg: "invalid resource type for permission", } } if err := p.Action.Valid(); err != nil { - return &Error{ - Code: EInvalid, + return &errors2.Error{ + Code: errors2.EInvalid, Err: err, Msg: "invalid action type for permission", } } if p.Resource.OrgID != nil && !p.Resource.OrgID.Valid() { - return &Error{ - Code: EInvalid, - Err: ErrInvalidID, + return &errors2.Error{ + Code: errors2.EInvalid, + Err: platform.ErrInvalidID, Msg: "invalid org id for permission", } } if p.Resource.ID != nil && !p.Resource.ID.Valid() { - return &Error{ - Code: EInvalid, - Err: ErrInvalidID, + return &errors2.Error{ + Code: errors2.EInvalid, + Err: platform.ErrInvalidID, Msg: "invalid id for permission", } } @@ -365,7 +318,7 @@ func (p *Permission) Valid() error { } // NewPermission returns a permission with provided arguments. -func NewPermission(a Action, rt ResourceType, orgID ID) (*Permission, error) { +func NewPermission(a Action, rt ResourceType, orgID platform.ID) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ @@ -378,7 +331,7 @@ func NewPermission(a Action, rt ResourceType, orgID ID) (*Permission, error) { } // NewResourcePermission returns a permission with provided arguments. -func NewResourcePermission(a Action, rt ResourceType, rid ID) (*Permission, error) { +func NewResourcePermission(a Action, rt ResourceType, rid platform.ID) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ @@ -402,7 +355,7 @@ func NewGlobalPermission(a Action, rt ResourceType) (*Permission, error) { } // NewPermissionAtID creates a permission with the provided arguments. -func NewPermissionAtID(id ID, a Action, rt ResourceType, orgID ID) (*Permission, error) { +func NewPermissionAtID(id platform.ID, a Action, rt ResourceType, orgID platform.ID) (*Permission, error) { p := &Permission{ Action: a, Resource: Resource{ @@ -419,6 +372,11 @@ func NewPermissionAtID(id ID, a Action, rt ResourceType, orgID ID) (*Permission, func OperPermissions() []Permission { ps := []Permission{} for _, r := range AllResourceTypes { + // For now, we are only allowing instance permissions when logged in through session auth + // That is handled in user resource mapping + if r == InstanceResourceType { + continue + } for _, a := range actions { ps = append(ps, Permission{Action: a, Resource: Resource{Type: r}}) } @@ -432,15 +390,25 @@ func OperPermissions() []Permission { func ReadAllPermissions() []Permission { ps := make([]Permission, len(AllResourceTypes)) for i, t := range AllResourceTypes { + // For now, we are only allowing instance permissions when logged in through session auth + // That is handled in user resource mapping + if t == InstanceResourceType { + continue + } ps[i] = Permission{Action: ReadAction, Resource: Resource{Type: t}} } return ps } // OwnerPermissions are the default permissions for those who own a resource. -func OwnerPermissions(orgID ID) []Permission { +func OwnerPermissions(orgID platform.ID) []Permission { ps := []Permission{} for _, r := range AllResourceTypes { + // For now, we are only allowing instance permissions when logged in through session auth + // That is handled in user resource mapping + if r == InstanceResourceType { + continue + } for _, a := range actions { if r == OrgsResourceType { ps = append(ps, Permission{Action: a, Resource: Resource{Type: r, ID: &orgID}}) @@ -453,7 +421,7 @@ func OwnerPermissions(orgID ID) []Permission { } // MePermissions is the permission to read/write myself. -func MePermissions(userID ID) []Permission { +func MePermissions(userID platform.ID) []Permission { ps := []Permission{} for _, a := range actions { ps = append(ps, Permission{Action: a, Resource: Resource{Type: UsersResourceType, ID: &userID}}) @@ -463,9 +431,14 @@ func MePermissions(userID ID) []Permission { } // MemberPermissions are the default permissions for those who can see a resource. -func MemberPermissions(orgID ID) []Permission { +func MemberPermissions(orgID platform.ID) []Permission { ps := []Permission{} for _, r := range AllResourceTypes { + // For now, we are only allowing instance permissions when logged in through session auth + // That is handled in user resource mapping + if r == InstanceResourceType { + continue + } if r == OrgsResourceType { ps = append(ps, Permission{Action: ReadAction, Resource: Resource{Type: r, ID: &orgID}}) continue @@ -477,6 +450,6 @@ func MemberPermissions(orgID ID) []Permission { } // MemberPermissions are the default permissions for those who can see a resource. -func MemberBucketPermission(bucketID ID) Permission { +func MemberBucketPermission(bucketID platform.ID) Permission { return Permission{Action: ReadAction, Resource: Resource{Type: BucketsResourceType, ID: &bucketID}} } diff --git a/authz_test.go b/authz_test.go index 93c1327c3bb..f92a9ea37f4 100644 --- a/authz_test.go +++ b/authz_test.go @@ -4,6 +4,7 @@ import ( "testing" platform "github.com/influxdata/influxdb/v2" + platform2 "github.com/influxdata/influxdb/v2/kit/platform" influxdbtesting "github.com/influxdata/influxdb/v2/testing" ) @@ -279,7 +280,7 @@ func TestPermission_Valid(t *testing.T) { Action: platform.WriteAction, Resource: platform.Resource{ Type: platform.BucketsResourceType, - ID: func() *platform.ID { id := platform.InvalidID(); return &id }(), + ID: func() *platform2.ID { id := platform2.InvalidID(); return &id }(), OrgID: influxdbtesting.IDPtr(1), }, }, @@ -324,6 +325,8 @@ func TestPermissionAllResources_Valid(t *testing.T) { platform.BucketsResourceType, platform.DashboardsResourceType, platform.SourcesResourceType, + platform.NotebooksResourceType, + platform.AnnotationsResourceType, } for _, rt := range resources { @@ -431,7 +434,7 @@ func TestPermission_String(t *testing.T) { } } -func validID() *platform.ID { - id := platform.ID(100) +func validID() *platform2.ID { + id := platform2.ID(100) return &id } diff --git a/backup.go b/backup.go index 71f3b08ab6f..c5f4af6b708 100644 --- a/backup.go +++ b/backup.go @@ -4,6 +4,8 @@ import ( "context" "io" "time" + + "github.com/influxdata/influxdb/v2/kit/platform" ) const ( @@ -17,6 +19,31 @@ type BackupService interface { // BackupShard downloads a backup file for a single shard. BackupShard(ctx context.Context, w io.Writer, shardID uint64, since time.Time) error + + // RLockKVStore locks the database. + RLockKVStore() + + // RUnlockKVStore unlocks the database. + RUnlockKVStore() +} + +// SqlBackupRestoreService represents the backup and restore functions for the sqlite database. +type SqlBackupRestoreService interface { + // BackupSqlStore creates a live backup copy of the sqlite database. + BackupSqlStore(ctx context.Context, w io.Writer) error + + // RestoreSqlStore restores & replaces the sqlite database. + RestoreSqlStore(ctx context.Context, r io.Reader) error + + // RLockSqlStore takes a read lock on the database + RLockSqlStore() + + // RUnlockSqlStore releases a previously-taken read lock on the database. + RUnlockSqlStore() +} + +type BucketManifestWriter interface { + WriteManifest(ctx context.Context, w io.Writer) error } // RestoreService represents the data restore functions of InfluxDB. @@ -24,21 +51,66 @@ type RestoreService interface { // RestoreKVStore restores & replaces metadata database. RestoreKVStore(ctx context.Context, r io.Reader) error - // RestoreKVStore restores the metadata database. - RestoreBucket(ctx context.Context, id ID, rpiData []byte) (shardIDMap map[uint64]uint64, err error) + // RestoreBucket restores storage metadata for a bucket. + // TODO(danmoran): As far as I can tell, dbInfo is typed as a []byte because typing it as + // a meta.DatabaseInfo introduces a circular dependency between the root package and `meta`. + // We should refactor to make this signature easier to use. It might be easier to wait + // until we're ready to delete the 2.0.x restore APIs before refactoring. + RestoreBucket(ctx context.Context, id platform.ID, dbInfo []byte) (shardIDMap map[uint64]uint64, err error) // RestoreShard uploads a backup file for a single shard. RestoreShard(ctx context.Context, shardID uint64, r io.Reader) error } +// BucketMetadataManifest contains the information about a bucket for backup purposes. +// It is composed of various nested structs below. +type BucketMetadataManifest struct { + OrganizationID platform.ID `json:"organizationID"` + OrganizationName string `json:"organizationName"` + BucketID platform.ID `json:"bucketID"` + BucketName string `json:"bucketName"` + Description *string `json:"description,omitempty"` + DefaultRetentionPolicy string `json:"defaultRetentionPolicy"` + RetentionPolicies []RetentionPolicyManifest `json:"retentionPolicies"` +} + +type RetentionPolicyManifest struct { + Name string `json:"name"` + ReplicaN int `json:"replicaN"` + Duration time.Duration `json:"duration"` + ShardGroupDuration time.Duration `json:"shardGroupDuration"` + ShardGroups []ShardGroupManifest `json:"shardGroups"` + Subscriptions []SubscriptionManifest `json:"subscriptions"` +} + +type ShardGroupManifest struct { + ID uint64 `json:"id"` + StartTime time.Time `json:"startTime"` + EndTime time.Time `json:"endTime"` + DeletedAt *time.Time `json:"deletedAt,omitempty"` // use pointer to time.Time so that omitempty works + TruncatedAt *time.Time `json:"truncatedAt,omitempty"` // use pointer to time.Time so that omitempty works + Shards []ShardManifest `json:"shards"` +} + +type ShardManifest struct { + ID uint64 `json:"id"` + ShardOwners []ShardOwner `json:"shardOwners"` +} + +type ShardOwner struct { + NodeID uint64 `json:"nodeID"` +} + +type SubscriptionManifest struct { + Name string `json:"name"` + Mode string `json:"mode"` + Destinations []string `json:"destinations"` +} + // Manifest lists the KV and shard file information contained in the backup. type Manifest struct { KV ManifestKVEntry `json:"kv"` Files []ManifestEntry `json:"files"` - - // These fields are only set if filtering options are set on the CLI. - OrganizationID string `json:"organizationID,omitempty"` - BucketID string `json:"bucketID,omitempty"` } // ManifestEntry contains the data information for a backed up shard. @@ -59,6 +131,17 @@ type ManifestKVEntry struct { Size int64 `json:"size"` } +type RestoredBucketMappings struct { + ID platform.ID `json:"id"` + Name string `json:"name"` + ShardMappings []RestoredShardMapping `json:"shardMappings"` +} + +type RestoredShardMapping struct { + OldId uint64 `json:"oldId"` + NewId uint64 `json:"newId"` +} + // Size returns the size of the manifest. func (m *Manifest) Size() int64 { n := m.KV.Size diff --git a/backup/backup.go b/backup/backup.go new file mode 100644 index 00000000000..94cc4a9b58a --- /dev/null +++ b/backup/backup.go @@ -0,0 +1,143 @@ +package backup + +import ( + "context" + "encoding/json" + "io" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/tenant" + "github.com/influxdata/influxdb/v2/v1/services/meta" +) + +type BucketManifestWriter struct { + ts *tenant.Service + mc *meta.Client +} + +func NewBucketManifestWriter(ts *tenant.Service, mc *meta.Client) BucketManifestWriter { + return BucketManifestWriter{ + ts: ts, + mc: mc, + } +} + +// WriteManifest writes a bucket manifest describing all of the buckets that exist in the database. +// It is intended to be used to write to an HTTP response after appropriate measures have been taken +// to ensure that the request is authorized. +func (b BucketManifestWriter) WriteManifest(ctx context.Context, w io.Writer) error { + bkts, _, err := b.ts.FindBuckets(ctx, influxdb.BucketFilter{}) + if err != nil { + return err + } + + l := make([]influxdb.BucketMetadataManifest, 0, len(bkts)) + + for _, bkt := range bkts { + org, err := b.ts.OrganizationService.FindOrganizationByID(ctx, bkt.OrgID) + if err != nil { + return err + } + + dbInfo := b.mc.Database(bkt.ID.String()) + + var description *string + if bkt.Description != "" { + description = &bkt.Description + } + + l = append(l, influxdb.BucketMetadataManifest{ + OrganizationID: bkt.OrgID, + OrganizationName: org.Name, + BucketID: bkt.ID, + BucketName: bkt.Name, + Description: description, + DefaultRetentionPolicy: dbInfo.DefaultRetentionPolicy, + RetentionPolicies: retentionPolicyToManifest(dbInfo.RetentionPolicies), + }) + } + + return json.NewEncoder(w).Encode(&l) +} + +// retentionPolicyToManifest and the various similar functions that follow are for converting +// from the structs in the meta package to the manifest structs +func retentionPolicyToManifest(meta []meta.RetentionPolicyInfo) []influxdb.RetentionPolicyManifest { + r := make([]influxdb.RetentionPolicyManifest, 0, len(meta)) + + for _, m := range meta { + r = append(r, influxdb.RetentionPolicyManifest{ + Name: m.Name, + ReplicaN: m.ReplicaN, + Duration: m.Duration, + ShardGroupDuration: m.ShardGroupDuration, + ShardGroups: shardGroupToManifest(m.ShardGroups), + Subscriptions: subscriptionInfosToManifest(m.Subscriptions), + }) + } + + return r +} + +func subscriptionInfosToManifest(subInfos []meta.SubscriptionInfo) []influxdb.SubscriptionManifest { + r := make([]influxdb.SubscriptionManifest, 0, len(subInfos)) + + for _, s := range subInfos { + r = append(r, influxdb.SubscriptionManifest(s)) + } + + return r +} + +func shardGroupToManifest(shardGroups []meta.ShardGroupInfo) []influxdb.ShardGroupManifest { + r := make([]influxdb.ShardGroupManifest, 0, len(shardGroups)) + + for _, s := range shardGroups { + deletedAt := &s.DeletedAt + truncatedAt := &s.TruncatedAt + + // set deletedAt and truncatedAt to nil rather than their zero values so that the fields + // can be properly omitted from the JSON response if they are empty + if deletedAt.IsZero() { + deletedAt = nil + } + + if truncatedAt.IsZero() { + truncatedAt = nil + } + + r = append(r, influxdb.ShardGroupManifest{ + ID: s.ID, + StartTime: s.StartTime, + EndTime: s.EndTime, + DeletedAt: deletedAt, + TruncatedAt: truncatedAt, + Shards: shardInfosToManifest(s.Shards), + }) + } + + return r +} + +func shardInfosToManifest(shards []meta.ShardInfo) []influxdb.ShardManifest { + r := make([]influxdb.ShardManifest, 0, len(shards)) + + for _, s := range shards { + r = append(r, influxdb.ShardManifest{ + ID: s.ID, + ShardOwners: shardOwnersToManifest(s.Owners), + }) + } + + return r +} + +func shardOwnersToManifest(shardOwners []meta.ShardOwner) []influxdb.ShardOwner { + r := make([]influxdb.ShardOwner, 0, len(shardOwners)) + + for _, s := range shardOwners { + r = append(r, influxdb.ShardOwner(s)) + } + + return r +} diff --git a/bolt/bbolt.go b/bolt/bbolt.go index 8b6f6ebf0d4..9d6c8bdace3 100644 --- a/bolt/bbolt.go +++ b/bolt/bbolt.go @@ -8,6 +8,7 @@ import ( "time" platform "github.com/influxdata/influxdb/v2" + platform2 "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/rand" "github.com/influxdata/influxdb/v2/snowflake" bolt "go.etcd.io/bbolt" @@ -22,9 +23,11 @@ type Client struct { db *bolt.DB log *zap.Logger - IDGenerator platform.IDGenerator + IDGenerator platform2.IDGenerator TokenGenerator platform.TokenGenerator platform.TimeGenerator + + pluginsCollector *pluginMetricsCollector } // NewClient returns an instance of a Client. @@ -34,6 +37,8 @@ func NewClient(log *zap.Logger) *Client { IDGenerator: snowflake.NewIDGenerator(), TokenGenerator: rand.NewTokenGenerator(64), TimeGenerator: platform.RealTimeGenerator{}, + // Refresh telegraf plugin metrics every hour. + pluginsCollector: NewPluginMetricsCollector(time.Minute * 59), } } @@ -56,7 +61,16 @@ func (c *Client) Open(ctx context.Context) error { // Open database file. db, err := bolt.Open(c.Path, 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { - return fmt.Errorf("unable to open boltdb; is there a chronograf already running? %v", err) + // Hack to give a slightly nicer error message for a known failure mode when bolt calls + // mmap on a file system that doesn't support the MAP_SHARED option. + // + // See: https://github.com/boltdb/bolt/issues/272 + // See: https://stackoverflow.com/a/18421071 + if err.Error() == "invalid argument" { + return fmt.Errorf("unable to open boltdb: mmap of %q may not support the MAP_SHARED option", c.Path) + } + + return fmt.Errorf("unable to open boltdb: %w", err) } c.db = db @@ -64,6 +78,8 @@ func (c *Client) Open(ctx context.Context) error { return err } + c.pluginsCollector.Open(c.db) + c.log.Info("Resources opened", zap.String("path", c.Path)) return nil } @@ -86,6 +102,8 @@ func (c *Client) initialize(ctx context.Context) error { scraperBucket, telegrafBucket, telegrafPluginsBucket, + remoteBucket, + replicationBucket, userBucket, } for _, bktName := range bkts { @@ -103,6 +121,7 @@ func (c *Client) initialize(ctx context.Context) error { // Close the connection to the bolt database func (c *Client) Close() error { + c.pluginsCollector.Close() if c.db != nil { return c.db.Close() } diff --git a/bolt/bbolt_test.go b/bolt/bbolt_test.go index f7d2ed253c6..1ec0f7b957b 100644 --- a/bolt/bbolt_test.go +++ b/bolt/bbolt_test.go @@ -3,7 +3,6 @@ package bolt_test import ( "context" "errors" - "io/ioutil" "os" "path/filepath" "testing" @@ -27,7 +26,7 @@ func NewTestClient(t *testing.T) (*bolt.Client, func(), error) { func newTestClient(t *testing.T) (*bolt.Client, func(), error) { c := bolt.NewClient(zaptest.NewLogger(t)) - f, err := ioutil.TempFile("", "influxdata-platform-bolt-") + f, err := os.CreateTemp("", "influxdata-platform-bolt-") if err != nil { return nil, nil, errors.New("unable to open temporary boltdb file") } @@ -44,7 +43,7 @@ func newTestClient(t *testing.T) (*bolt.Client, func(), error) { } func TestClientOpen(t *testing.T) { - tempDir, err := ioutil.TempDir("", "") + tempDir, err := os.MkdirTemp("", "") if err != nil { t.Fatalf("unable to create temporary test directory %v", err) } @@ -70,7 +69,7 @@ func TestClientOpen(t *testing.T) { } func NewTestKVStore(t *testing.T) (*bolt.KVStore, func(), error) { - f, err := ioutil.TempFile("", "influxdata-platform-bolt-") + f, err := os.CreateTemp("", "influxdata-platform-bolt-") if err != nil { return nil, nil, errors.New("unable to open temporary boltdb file") } diff --git a/bolt/id.go b/bolt/id.go index 23d8aabbaaf..a6ad52f4548 100644 --- a/bolt/id.go +++ b/bolt/id.go @@ -5,7 +5,7 @@ import ( "fmt" "math/rand" - platform "github.com/influxdata/influxdb/v2" + platform2 "github.com/influxdata/influxdb/v2/kit/platform" bolt "go.etcd.io/bbolt" "go.uber.org/zap" ) @@ -16,7 +16,7 @@ var ( errIDNotFound = errors.New("source not found") ) -var _ platform.IDGenerator = (*Client)(nil) +var _ platform2.IDGenerator = (*Client)(nil) func (c *Client) initializeID(tx *bolt.Tx) error { if _, err := tx.CreateBucketIfNotExists(idsBucket); err != nil { @@ -38,9 +38,9 @@ func (c *Client) initializeID(tx *bolt.Tx) error { } // ID retrieves the unique ID for this influx instance. -func (c *Client) ID() platform.ID { +func (c *Client) ID() platform2.ID { // if any error occurs return a random number - id := platform.ID(rand.Int63()) + id := platform2.ID(rand.Int63()) err := c.db.View(func(tx *bolt.Tx) error { val, err := c.getID(tx) if err != nil { @@ -58,23 +58,23 @@ func (c *Client) ID() platform.ID { return id } -func (c *Client) getID(tx *bolt.Tx) (platform.ID, error) { +func (c *Client) getID(tx *bolt.Tx) (platform2.ID, error) { v := tx.Bucket(idsBucket).Get(idKey) if len(v) == 0 { - return platform.InvalidID(), errIDNotFound + return platform2.InvalidID(), errIDNotFound } return decodeID(v) } -func decodeID(val []byte) (platform.ID, error) { - if len(val) < platform.IDLength { +func decodeID(val []byte) (platform2.ID, error) { + if len(val) < platform2.IDLength { // This should not happen. - return platform.InvalidID(), fmt.Errorf("provided value is too short to contain an ID. Please report this error") + return platform2.InvalidID(), fmt.Errorf("provided value is too short to contain an ID. Please report this error") } - var id platform.ID - if err := id.Decode(val[:platform.IDLength]); err != nil { - return platform.InvalidID(), err + var id platform2.ID + if err := id.Decode(val[:platform2.IDLength]); err != nil { + return platform2.InvalidID(), err } return id, nil } diff --git a/bolt/id_test.go b/bolt/id_test.go index daf98993223..017993736b6 100644 --- a/bolt/id_test.go +++ b/bolt/id_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - platform "github.com/influxdata/influxdb/v2" + platform2 "github.com/influxdata/influxdb/v2/kit/platform" "github.com/influxdata/influxdb/v2/mock" ) @@ -15,7 +15,7 @@ func TestID(t *testing.T) { } defer closeFn() - testID := platform.ID(70000) + testID := platform2.ID(70000) c.IDGenerator = mock.NewIDGenerator(testID.String(), t) if err := c.Open(context.Background()); err != nil { diff --git a/bolt/kv.go b/bolt/kv.go index 2a0de90f879..85f99f08282 100644 --- a/bolt/kv.go +++ b/bolt/kv.go @@ -13,6 +13,8 @@ import ( "github.com/influxdata/influxdb/v2/kit/tracing" "github.com/influxdata/influxdb/v2/kv" + "github.com/influxdata/influxdb/v2/kv/migration" + "github.com/influxdata/influxdb/v2/kv/migration/all" "github.com/influxdata/influxdb/v2/pkg/fs" bolt "go.etcd.io/bbolt" "go.uber.org/zap" @@ -105,6 +107,14 @@ func (s *KVStore) Close() error { return nil } +func (s *KVStore) RLock() { + s.mu.RLock() +} + +func (s *KVStore) RUnlock() { + s.mu.RUnlock() +} + // DB returns a reference to the current Bolt database. func (s *KVStore) DB() *bolt.DB { s.mu.RLock() @@ -221,6 +231,11 @@ func (s *KVStore) Restore(ctx context.Context, r io.Reader) error { return err } + // Run the migrations on the restored database prior to swapping it in. + if err := s.migrateRestored(ctx); err != nil { + return err + } + // Swap and reopen under lock. s.mu.Lock() defer s.mu.Unlock() @@ -243,6 +258,33 @@ func (s *KVStore) Restore(ctx context.Context, r io.Reader) error { return nil } +// migrateRestored opens the database at the temporary path and applies the +// migrations to it. The database at the temporary path is closed after the +// migrations are complete. This should be used as part of the restore +// operation, prior to swapping the restored database with the active database. +func (s *KVStore) migrateRestored(ctx context.Context) error { + restoredClient := NewClient(s.log.With(zap.String("service", "restored bolt"))) + restoredClient.Path = s.tempPath() + if err := restoredClient.Open(ctx); err != nil { + return err + } + defer restoredClient.Close() + + restoredKV := NewKVStore(s.log.With(zap.String("service", "restored kvstore-bolt")), s.tempPath()) + restoredKV.WithDB(restoredClient.DB()) + + migrator, err := migration.NewMigrator( + s.log.With(zap.String("service", "bolt restore migrations")), + restoredKV, + all.Migrations[:]..., + ) + if err != nil { + return err + } + + return migrator.Up(ctx) +} + // Tx is a light wrapper around a boltdb transaction. It implements kv.Tx. type Tx struct { tx *bolt.Tx diff --git a/bolt/metrics.go b/bolt/metrics.go index 535827e4661..d47314cdd78 100644 --- a/bolt/metrics.go +++ b/bolt/metrics.go @@ -2,6 +2,7 @@ package bolt import ( "encoding/json" + "sync" "time" "github.com/prometheus/client_golang/prometheus" @@ -20,6 +21,8 @@ var ( scraperBucket = []byte("scraperv2") telegrafBucket = []byte("telegrafv1") telegrafPluginsBucket = []byte("telegrafPluginsv1") + remoteBucket = []byte("remotesv2") + replicationBucket = []byte("replicationsv2") userBucket = []byte("usersv1") ) @@ -64,6 +67,16 @@ var ( "Number of individual telegraf plugins configured", []string{"plugin"}, nil) + remoteDesc = prometheus.NewDesc( + "influxdb_remotes_total", + "Number of total remote connections configured on the server", + nil, nil) + + replicationDesc = prometheus.NewDesc( + "influxdb_replications_total", + "Number of total replication configurations on the server", + nil, nil) + boltWritesDesc = prometheus.NewDesc( "boltdb_writes_total", "Total number of boltdb writes", @@ -84,38 +97,110 @@ func (c *Client) Describe(ch chan<- *prometheus.Desc) { ch <- dashboardsDesc ch <- scrapersDesc ch <- telegrafsDesc - ch <- telegrafPluginsDesc + ch <- remoteDesc + ch <- replicationDesc ch <- boltWritesDesc ch <- boltReadsDesc + + c.pluginsCollector.Describe(ch) } -type instaTicker struct { - tick chan struct{} - timeCh <-chan time.Time +type pluginMetricsCollector struct { + ticker *time.Ticker + tickerDone chan struct{} + + // cacheMu protects cache + cacheMu sync.RWMutex + cache map[string]float64 } -var ( - // ticker is this influx' timer for when to renew the cache of configured plugin metrics. - ticker *instaTicker - // telegrafPlugins is a cache of this influx' metrics of configured plugins. - telegrafPlugins = map[string]float64{} -) +func (c *pluginMetricsCollector) Open(db *bolt.DB) { + go c.pollTelegrafStats(db) +} + +func (c *pluginMetricsCollector) pollTelegrafStats(db *bolt.DB) { + for { + select { + case <-c.tickerDone: + return + case <-c.ticker.C: + c.refreshTelegrafStats(db) + } + } +} -// Initialize a simple channel that will instantly "tick", -// backed by a time.Ticker's channel. -func init() { - ticker = &instaTicker{ - tick: make(chan struct{}, 1), - timeCh: time.NewTicker(time.Minute * 59).C, +func (c *pluginMetricsCollector) refreshTelegrafStats(db *bolt.DB) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + // Check if stats-polling got canceled between the point of receiving + // a tick and grabbing the lock. + select { + case <-c.tickerDone: + return + default: } - ticker.tick <- struct{}{} + // Clear plugins from last check. + c.cache = map[string]float64{} - go func() { - for range ticker.timeCh { - ticker.tick <- struct{}{} + // Loop through all registered plugins. + _ = db.View(func(tx *bolt.Tx) error { + rawPlugins := [][]byte{} + if err := tx.Bucket(telegrafPluginsBucket).ForEach(func(k, v []byte) error { + rawPlugins = append(rawPlugins, v) + return nil + }); err != nil { + return err } - }() + + for _, v := range rawPlugins { + pStats := map[string]float64{} + if err := json.Unmarshal(v, &pStats); err != nil { + return err + } + + for k, v := range pStats { + c.cache[k] += v + } + } + + return nil + }) +} + +func (c *pluginMetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- telegrafPluginsDesc +} + +func (c *pluginMetricsCollector) Collect(ch chan<- prometheus.Metric) { + c.cacheMu.RLock() + defer c.cacheMu.RUnlock() + + for k, v := range c.cache { + ch <- prometheus.MustNewConstMetric( + telegrafPluginsDesc, + prometheus.GaugeValue, + v, + k, // Adds a label for plugin type.name. + ) + } +} + +func (c *pluginMetricsCollector) Close() { + // Wait for any already-running cache-refresh procedures to complete. + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + close(c.tickerDone) +} + +func NewPluginMetricsCollector(tickDuration time.Duration) *pluginMetricsCollector { + return &pluginMetricsCollector{ + ticker: time.NewTicker(tickDuration), + tickerDone: make(chan struct{}), + cache: make(map[string]float64), + } } // Collect returns the current state of all metrics of the collector. @@ -138,44 +223,18 @@ func (c *Client) Collect(ch chan<- prometheus.Metric) { orgs, buckets, users, tokens := 0, 0, 0, 0 dashboards, scrapers, telegrafs := 0, 0, 0 + remotes, replications := 0, 0 _ = c.db.View(func(tx *bolt.Tx) error { buckets = tx.Bucket(bucketBucket).Stats().KeyN dashboards = tx.Bucket(dashboardBucket).Stats().KeyN orgs = tx.Bucket(organizationBucket).Stats().KeyN scrapers = tx.Bucket(scraperBucket).Stats().KeyN telegrafs = tx.Bucket(telegrafBucket).Stats().KeyN + remotes = tx.Bucket(remoteBucket).Stats().KeyN + replications = tx.Bucket(replicationBucket).Stats().KeyN tokens = tx.Bucket(authorizationBucket).Stats().KeyN users = tx.Bucket(userBucket).Stats().KeyN - - // Only process and store telegraf configs once per hour. - select { - case <-ticker.tick: - // Clear plugins from last check. - telegrafPlugins = map[string]float64{} - rawPlugins := [][]byte{} - - // Loop through all reported number of plugins in the least intrusive way - // (vs a global map and locking every time a config is updated). - tx.Bucket(telegrafPluginsBucket).ForEach(func(k, v []byte) error { - rawPlugins = append(rawPlugins, v) - return nil - }) - - for _, v := range rawPlugins { - pStats := map[string]float64{} - if err := json.Unmarshal(v, &pStats); err != nil { - return err - } - - for k, v := range pStats { - telegrafPlugins[k] += v - } - } - - return nil - default: - return nil - } + return nil }) ch <- prometheus.MustNewConstMetric( @@ -220,12 +279,17 @@ func (c *Client) Collect(ch chan<- prometheus.Metric) { float64(telegrafs), ) - for k, v := range telegrafPlugins { - ch <- prometheus.MustNewConstMetric( - telegrafPluginsDesc, - prometheus.GaugeValue, - v, - k, // Adds a label for plugin type.name. - ) - } + ch <- prometheus.MustNewConstMetric( + remoteDesc, + prometheus.CounterValue, + float64(remotes), + ) + + ch <- prometheus.MustNewConstMetric( + replicationDesc, + prometheus.CounterValue, + float64(replications), + ) + + c.pluginsCollector.Collect(ch) } diff --git a/bolt/metrics_test.go b/bolt/metrics_test.go index d72bdbd82f7..fc834279f1d 100644 --- a/bolt/metrics_test.go +++ b/bolt/metrics_test.go @@ -1,14 +1,23 @@ package bolt_test import ( + "context" "testing" + "time" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/bolt" "github.com/influxdata/influxdb/v2/kit/prom" "github.com/influxdata/influxdb/v2/kit/prom/promtest" + "github.com/influxdata/influxdb/v2/kv/migration/all" + telegrafservice "github.com/influxdata/influxdb/v2/telegraf/service" + "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" ) func TestInitialMetrics(t *testing.T) { + t.Parallel() + client, teardown, err := NewTestClient(t) if err != nil { t.Fatalf("unable to setup bolt client: %v", err) @@ -29,6 +38,8 @@ func TestInitialMetrics(t *testing.T) { "influxdb_users_total": 0, "influxdb_tokens_total": 0, "influxdb_dashboards_total": 0, + "influxdb_remotes_total": 0, + "influxdb_replications_total": 0, "boltdb_reads_total": 0, } for name, count := range metrics { @@ -38,3 +49,83 @@ func TestInitialMetrics(t *testing.T) { } } } + +func TestPluginMetrics(t *testing.T) { + t.Parallel() + + // Set up a BoltDB, and register a telegraf config. + client, teardown, err := NewTestClient(t) + require.NoError(t, err) + defer teardown() + + ctx := context.Background() + log := zaptest.NewLogger(t) + kvStore := bolt.NewKVStore(log, client.Path) + kvStore.WithDB(client.DB()) + require.NoError(t, all.Up(ctx, log, kvStore)) + + tsvc := telegrafservice.New(kvStore) + tconf := influxdb.TelegrafConfig{ + Name: "test", + Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]", + OrgID: 1, + } + require.NoError(t, tsvc.CreateTelegrafConfig(ctx, &tconf, 1)) + + // Run a plugin metrics collector with a quicker tick interval than the default. + pluginCollector := bolt.NewPluginMetricsCollector(time.Millisecond) + pluginCollector.Open(client.DB()) + defer pluginCollector.Close() + + reg := prom.NewRegistry(zaptest.NewLogger(t)) + reg.MustRegister(pluginCollector) + + // Run a periodic gather in the background. + gatherTick := time.NewTicker(time.Millisecond) + doneCh := make(chan struct{}) + defer close(doneCh) + + go func() { + for { + select { + case <-doneCh: + return + case <-gatherTick.C: + _, err := reg.Gather() + require.NoError(t, err) + } + } + }() + + // Run a few gathers to see if any race conditions are flushed out. + time.Sleep(250 * time.Millisecond) + + // Gather plugin metrics and ensure they're correct. + metrics, err := reg.Gather() + require.NoError(t, err) + inCpu := promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "inputs.cpu"}) + outInfluxDb := promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "outputs.influxdb_v2"}) + require.Equal(t, 1, int(inCpu.GetGauge().GetValue())) + require.Equal(t, 1, int(outInfluxDb.GetGauge().GetValue())) + + // Register some more plugins. + tconf = influxdb.TelegrafConfig{ + Name: "test", + Config: "[[inputs.mem]]\n[[outputs.influxdb_v2]]", + OrgID: 1, + } + require.NoError(t, tsvc.CreateTelegrafConfig(ctx, &tconf, 2)) + + // Let a few more background gathers run. + time.Sleep(250 * time.Millisecond) + + // Gather again, and ensure plugin metrics have been updated. + metrics, err = reg.Gather() + require.NoError(t, err) + inCpu = promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "inputs.cpu"}) + inMem := promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "inputs.mem"}) + outInfluxDb = promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "outputs.influxdb_v2"}) + require.Equal(t, 1, int(inCpu.GetGauge().GetValue())) + require.Equal(t, 1, int(inMem.GetGauge().GetValue())) + require.Equal(t, 2, int(outInfluxDb.GetGauge().GetValue())) +} diff --git a/bucket.go b/bucket.go index 9dbc70552a6..095a9034f8e 100644 --- a/bucket.go +++ b/bucket.go @@ -5,6 +5,9 @@ import ( "fmt" "strings" "time" + + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" ) const ( @@ -29,13 +32,14 @@ const InfiniteRetention = 0 // Bucket is a bucket. 🎉 type Bucket struct { - ID ID `json:"id,omitempty"` - OrgID ID `json:"orgID,omitempty"` + ID platform.ID `json:"id,omitempty"` + OrgID platform.ID `json:"orgID,omitempty"` Type BucketType `json:"type"` Name string `json:"name"` Description string `json:"description"` RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources RetentionPeriod time.Duration `json:"retentionPeriod"` + ShardGroupDuration time.Duration `json:"shardGroupDuration"` CRUDLog } @@ -78,7 +82,7 @@ var ( // BucketService represents a service for managing bucket data. type BucketService interface { // FindBucketByID returns a single bucket by ID. - FindBucketByID(ctx context.Context, id ID) (*Bucket, error) + FindBucketByID(ctx context.Context, id platform.ID) (*Bucket, error) // FindBucket returns the first bucket that matches filter. FindBucket(ctx context.Context, filter BucketFilter) (*Bucket, error) @@ -92,26 +96,27 @@ type BucketService interface { // UpdateBucket updates a single bucket with changeset. // Returns the new bucket state after update. - UpdateBucket(ctx context.Context, id ID, upd BucketUpdate) (*Bucket, error) + UpdateBucket(ctx context.Context, id platform.ID, upd BucketUpdate) (*Bucket, error) // DeleteBucket removes a bucket by ID. - DeleteBucket(ctx context.Context, id ID) error - FindBucketByName(ctx context.Context, orgID ID, name string) (*Bucket, error) + DeleteBucket(ctx context.Context, id platform.ID) error + FindBucketByName(ctx context.Context, orgID platform.ID, name string) (*Bucket, error) } // BucketUpdate represents updates to a bucket. // Only fields which are set are updated. type BucketUpdate struct { - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - RetentionPeriod *time.Duration `json:"retentionPeriod,omitempty"` + Name *string + Description *string + RetentionPeriod *time.Duration + ShardGroupDuration *time.Duration } // BucketFilter represents a set of filter that restrict the returned results. type BucketFilter struct { - ID *ID + ID *platform.ID Name *string - OrganizationID *ID + OrganizationID *platform.ID Org *string } @@ -157,9 +162,9 @@ func (f BucketFilter) String() string { return "[" + strings.Join(parts, ", ") + "]" } -func ErrInternalBucketServiceError(op string, err error) *Error { - return &Error{ - Code: EInternal, +func ErrInternalBucketServiceError(op string, err error) *errors.Error { + return &errors.Error{ + Code: errors.EInternal, Msg: fmt.Sprintf("unexpected error in buckets; Err: %v", err), Op: op, Err: err, diff --git a/check.go b/check.go index 24fcd95ec99..63c21aa9c09 100644 --- a/check.go +++ b/check.go @@ -3,6 +3,10 @@ package influxdb import ( "context" "encoding/json" + + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" + "github.com/influxdata/influxdb/v2/query/fluxlang" ) // consts for checks config. @@ -13,25 +17,25 @@ const ( // Check represents the information required to generate a periodic check task. type Check interface { - Valid(lang FluxLanguageService) error + Valid(lang fluxlang.FluxLanguageService) error Type() string ClearPrivateData() - SetTaskID(ID) - GetTaskID() ID - GetOwnerID() ID - SetOwnerID(ID) - GenerateFlux(lang FluxLanguageService) (string, error) + SetTaskID(platform.ID) + GetTaskID() platform.ID + GetOwnerID() platform.ID + SetOwnerID(platform.ID) + GenerateFlux(lang fluxlang.FluxLanguageService) (string, error) json.Marshaler CRUDLogSetter - SetID(id ID) - SetOrgID(id ID) + SetID(id platform.ID) + SetOrgID(id platform.ID) SetName(name string) SetDescription(description string) - GetID() ID + GetID() platform.ID GetCRUDLog() CRUDLog - GetOrgID() ID + GetOrgID() platform.ID GetName() string GetDescription() string } @@ -49,7 +53,7 @@ var ( // CheckService represents a service for managing checks. type CheckService interface { // FindCheckByID returns a single check by ID. - FindCheckByID(ctx context.Context, id ID) (Check, error) + FindCheckByID(ctx context.Context, id platform.ID) (Check, error) // FindCheck returns the first check that matches filter. FindCheck(ctx context.Context, filter CheckFilter) (Check, error) @@ -59,18 +63,18 @@ type CheckService interface { FindChecks(ctx context.Context, filter CheckFilter, opt ...FindOptions) ([]Check, int, error) // CreateCheck creates a new check and sets b.ID with the new identifier. - CreateCheck(ctx context.Context, c CheckCreate, userID ID) error + CreateCheck(ctx context.Context, c CheckCreate, userID platform.ID) error // UpdateCheck updates the whole check. // Returns the new check state after update. - UpdateCheck(ctx context.Context, id ID, c CheckCreate) (Check, error) + UpdateCheck(ctx context.Context, id platform.ID, c CheckCreate) (Check, error) // PatchCheck updates a single bucket with changeset. // Returns the new check state after update. - PatchCheck(ctx context.Context, id ID, upd CheckUpdate) (Check, error) + PatchCheck(ctx context.Context, id platform.ID, upd CheckUpdate) (Check, error) // DeleteCheck will delete the check by id. - DeleteCheck(ctx context.Context, id ID) error + DeleteCheck(ctx context.Context, id platform.ID) error } // CheckUpdate are properties than can be updated on a check @@ -89,15 +93,15 @@ type CheckCreate struct { // Valid returns err is the update is invalid. func (n *CheckUpdate) Valid() error { if n.Name != nil && *n.Name == "" { - return &Error{ - Code: EInvalid, + return &errors.Error{ + Code: errors.EInvalid, Msg: "Check Name can't be empty", } } if n.Description != nil && *n.Description == "" { - return &Error{ - Code: EInvalid, + return &errors.Error{ + Code: errors.EInvalid, Msg: "Check Description can't be empty", } } @@ -113,9 +117,9 @@ func (n *CheckUpdate) Valid() error { // CheckFilter represents a set of filters that restrict the returned results. type CheckFilter struct { - ID *ID + ID *platform.ID Name *string - OrgID *ID + OrgID *platform.ID Org *string UserResourceMappingFilter } diff --git a/checks/service.go b/checks/service.go index 98ad8964594..5277f911440 100644 --- a/checks/service.go +++ b/checks/service.go @@ -5,11 +5,14 @@ import ( "fmt" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/kit/tracing" "github.com/influxdata/influxdb/v2/kv" "github.com/influxdata/influxdb/v2/notification/check" "github.com/influxdata/influxdb/v2/query/fluxlang" "github.com/influxdata/influxdb/v2/snowflake" + "github.com/influxdata/influxdb/v2/task/taskmodel" "go.uber.org/zap" ) @@ -23,16 +26,16 @@ type Service struct { log *zap.Logger orgs influxdb.OrganizationService - tasks influxdb.TaskService + tasks taskmodel.TaskService timeGenerator influxdb.TimeGenerator - idGenerator influxdb.IDGenerator + idGenerator platform.IDGenerator checkStore *kv.IndexStore } // NewService constructs and configures a new checks.Service -func NewService(logger *zap.Logger, store kv.Store, orgs influxdb.OrganizationService, tasks influxdb.TaskService) *Service { +func NewService(logger *zap.Logger, store kv.Store, orgs influxdb.OrganizationService, tasks taskmodel.TaskService) *Service { return &Service{ kv: store, log: logger, @@ -83,7 +86,7 @@ func newCheckStore() *kv.IndexStore { } // FindCheckByID retrieves a check by id. -func (s *Service) FindCheckByID(ctx context.Context, id influxdb.ID) (influxdb.Check, error) { +func (s *Service) FindCheckByID(ctx context.Context, id platform.ID) (influxdb.Check, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -103,7 +106,7 @@ func (s *Service) FindCheckByID(ctx context.Context, id influxdb.ID) (influxdb.C return c, nil } -func (s *Service) findCheckByID(ctx context.Context, tx kv.Tx, id influxdb.ID) (influxdb.Check, error) { +func (s *Service) findCheckByID(ctx context.Context, tx kv.Tx, id platform.ID) (influxdb.Check, error) { chkVal, err := s.checkStore.FindEnt(ctx, tx, kv.Entity{PK: kv.EncID(id)}) if err != nil { return nil, err @@ -111,7 +114,7 @@ func (s *Service) findCheckByID(ctx context.Context, tx kv.Tx, id influxdb.ID) ( return chkVal.(influxdb.Check), nil } -func (s *Service) findCheckByName(ctx context.Context, tx kv.Tx, orgID influxdb.ID, name string) (influxdb.Check, error) { +func (s *Service) findCheckByName(ctx context.Context, tx kv.Tx, orgID platform.ID, name string) (influxdb.Check, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -119,8 +122,8 @@ func (s *Service) findCheckByName(ctx context.Context, tx kv.Tx, orgID influxdb. UniqueKey: kv.Encode(kv.EncID(orgID), kv.EncString(name)), }) if kv.IsNotFound(err) { - return nil, &influxdb.Error{ - Code: influxdb.ENotFound, + return nil, &errors.Error{ + Code: errors.ENotFound, Err: err, } } @@ -186,8 +189,8 @@ func (s *Service) FindCheck(ctx context.Context, filter influxdb.CheckFilter) (i } if c == nil { - return nil, &influxdb.Error{ - Code: influxdb.ENotFound, + return nil, &errors.Error{ + Code: errors.ENotFound, Msg: "check not found", } } @@ -227,7 +230,7 @@ func (s *Service) FindChecks(ctx context.Context, filter influxdb.CheckFilter, o if filter.Org != nil { o, err := s.orgs.FindOrganization(ctx, influxdb.OrganizationFilter{Name: filter.Org}) if err != nil { - return nil, 0, &influxdb.Error{Err: err} + return nil, 0, &errors.Error{Err: err} } filter.OrgID = &o.ID @@ -270,7 +273,7 @@ func (s *Service) FindChecks(ctx context.Context, filter influxdb.CheckFilter, o } // CreateCheck creates a influxdb check and sets ID. -func (s *Service) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userID influxdb.ID) (err error) { +func (s *Service) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userID platform.ID) (err error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -280,8 +283,8 @@ func (s *Service) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userI if c.GetOrgID().Valid() { if _, err := s.orgs.FindOrganizationByID(ctx, c.GetOrgID()); err != nil { - return &influxdb.Error{ - Code: influxdb.ENotFound, + return &errors.Error{ + Code: errors.ENotFound, Op: influxdb.OpCreateCheck, Err: err, } @@ -301,8 +304,8 @@ func (s *Service) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userI // create task initially in inactive state t, err := s.createCheckTask(ctx, c) if err != nil { - return &influxdb.Error{ - Code: influxdb.EInvalid, + return &errors.Error{ + Code: errors.EInvalid, Msg: "Could not create task from check", Err: err, } @@ -327,7 +330,7 @@ func (s *Service) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userI // update task to be in matching state to check if influxdb.Status(t.Status) != c.Status { - _, err = s.tasks.UpdateTask(ctx, t.ID, influxdb.TaskUpdate{ + _, err = s.tasks.UpdateTask(ctx, t.ID, taskmodel.TaskUpdate{ Status: strPtr(string(c.Status)), }) } @@ -335,13 +338,13 @@ func (s *Service) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userI return err } -func (s *Service) createCheckTask(ctx context.Context, c influxdb.CheckCreate) (*influxdb.Task, error) { +func (s *Service) createCheckTask(ctx context.Context, c influxdb.CheckCreate) (*taskmodel.Task, error) { script, err := c.GenerateFlux(fluxlang.DefaultService) if err != nil { return nil, err } - tc := influxdb.TaskCreate{ + tc := taskmodel.TaskCreate{ Type: c.Type(), Flux: script, OwnerID: c.GetOwnerID(), @@ -378,7 +381,7 @@ func (s *Service) putCheck(ctx context.Context, tx kv.Tx, c influxdb.Check, opts } // PatchCheck updates a check according the parameters set on upd. -func (s *Service) PatchCheck(ctx context.Context, id influxdb.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { +func (s *Service) PatchCheck(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -408,7 +411,7 @@ func (s *Service) PatchCheck(ctx context.Context, id influxdb.ID, upd influxdb.C } // UpdateCheck updates the check. -func (s *Service) UpdateCheck(ctx context.Context, id influxdb.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { +func (s *Service) UpdateCheck(ctx context.Context, id platform.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -437,7 +440,7 @@ func (s *Service) updateCheckTask(ctx context.Context, chk influxdb.CheckCreate) return err } - tu := influxdb.TaskUpdate{ + tu := taskmodel.TaskUpdate{ Flux: &flux, Description: strPtr(chk.GetDescription()), } @@ -453,8 +456,8 @@ func (s *Service) updateCheckTask(ctx context.Context, chk influxdb.CheckCreate) return err } -func (s *Service) patchCheckTask(ctx context.Context, taskID influxdb.ID, upd influxdb.CheckUpdate) error { - tu := influxdb.TaskUpdate{ +func (s *Service) patchCheckTask(ctx context.Context, taskID platform.ID, upd influxdb.CheckUpdate) error { + tu := taskmodel.TaskUpdate{ Description: upd.Description, } @@ -469,7 +472,7 @@ func (s *Service) patchCheckTask(ctx context.Context, taskID influxdb.ID, upd in return nil } -func (s *Service) updateCheck(ctx context.Context, tx kv.Tx, id influxdb.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { +func (s *Service) updateCheck(ctx context.Context, tx kv.Tx, id platform.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -483,8 +486,8 @@ func (s *Service) updateCheck(ctx context.Context, tx kv.Tx, id influxdb.ID, chk if chk.GetName() != current.GetName() { c0, err := s.findCheckByName(ctx, tx, current.GetOrgID(), chk.GetName()) if err == nil && c0.GetID() != id { - return nil, &influxdb.Error{ - Code: influxdb.EConflict, + return nil, &errors.Error{ + Code: errors.EConflict, Msg: "check name is not unique", } } @@ -497,7 +500,7 @@ func (s *Service) updateCheck(ctx context.Context, tx kv.Tx, id influxdb.ID, chk } } - // ID and OrganizationID can not be updated + // ID, OrganizationID, and OwnerID can not be updated. chk.SetID(current.GetID()) chk.SetOrgID(current.GetOrgID()) chk.SetOwnerID(current.GetOwnerID()) @@ -542,7 +545,7 @@ func (s *Service) patchCheck(ctx context.Context, tx kv.Tx, check influxdb.Check } // DeleteCheck deletes a check and prunes it from the index. -func (s *Service) DeleteCheck(ctx context.Context, id influxdb.ID) error { +func (s *Service) DeleteCheck(ctx context.Context, id platform.ID) error { ch, err := s.FindCheckByID(ctx, id) if err != nil { return err diff --git a/checks/service_external_test.go b/checks/service_external_test.go index e7c82c3bb9c..a6abc97845a 100644 --- a/checks/service_external_test.go +++ b/checks/service_external_test.go @@ -12,9 +12,13 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/influxdata/flux/ast" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/platform" + "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/notification" "github.com/influxdata/influxdb/v2/notification/check" + "github.com/influxdata/influxdb/v2/task/taskmodel" + itesting "github.com/influxdata/influxdb/v2/testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -157,32 +161,37 @@ var taskCmpOptions = cmp.Options{ }), // skip comparing permissions cmpopts.IgnoreFields( - influxdb.Task{}, + taskmodel.Task{}, "LatestCompleted", "LatestScheduled", "CreatedAt", "UpdatedAt", ), - cmp.Transformer("Sort", func(in []*influxdb.Task) []*influxdb.Task { - out := append([]*influxdb.Task{}, in...) // Copy input to avoid mutating it + cmp.Transformer("Sort", func(in []*taskmodel.Task) []*taskmodel.Task { + out := append([]*taskmodel.Task{}, in...) // Copy input to avoid mutating it sort.Slice(out, func(i, j int) bool { return out[i].ID > out[j].ID }) return out }), + cmp.Transformer("FormatFlux", func(in taskmodel.Task) taskmodel.Task { + newTask := in + newTask.Flux = itesting.FormatFluxString(&testing.T{}, newTask.Flux) + return newTask + }), } // CheckFields will include the IDGenerator, and checks type CheckFields struct { - IDGenerator influxdb.IDGenerator + IDGenerator platform.IDGenerator TimeGenerator influxdb.TimeGenerator - TaskService influxdb.TaskService + TaskService taskmodel.TaskService Checks []influxdb.Check Organizations []*influxdb.Organization - Tasks []influxdb.TaskCreate + Tasks []taskmodel.TaskCreate } -type checkServiceFactory func(CheckFields, *testing.T) (influxdb.CheckService, influxdb.TaskService, string, func()) +type checkServiceFactory func(CheckFields, *testing.T) (influxdb.CheckService, taskmodel.TaskService, string, func()) type checkServiceF func( init checkServiceFactory, @@ -240,13 +249,13 @@ func CreateCheck( t *testing.T, ) { type args struct { - userID influxdb.ID + userID platform.ID check influxdb.Check } type wants struct { - err *influxdb.Error + err *errors.Error checks []influxdb.Check - tasks []*influxdb.Task + tasks []*taskmodel.Task } tests := []struct { @@ -305,7 +314,7 @@ func CreateCheck( }, }, wants: wants{ - tasks: []*influxdb.Task{ + tasks: []*taskmodel.Task{ { ID: MustIDBase16("020f755c3c082000"), Name: "name1", @@ -314,7 +323,7 @@ func CreateCheck( Organization: "theorg", OwnerID: MustIDBase16("020f755c3c082001"), Status: "active", - Flux: "package main\nimport \"influxdata/influxdb/monitor\"\nimport \"experimental\"\nimport \"influxdata/influxdb/v1\"\n\ndata = from(bucket: \"telegraf\")\n\t|> range(start: -1h)\n\t|> filter(fn: (r) =>\n\t\t(r._field == \"usage_user\"))\n\noption task = {name: \"name1\", every: 1m}\n\ncheck = {\n\t_check_id: \"020f755c3c082000\",\n\t_check_name: \"name1\",\n\t_type: \"deadman\",\n\ttags: {k1: \"v1\", k2: \"v2\"},\n}\ncrit = (r) =>\n\t(r[\"dead\"])\nmessageFn = (r) =>\n\t(\"msg1\")\n\ndata\n\t|> v1[\"fieldsAsCols\"]()\n\t|> monitor[\"deadman\"](t: experimental[\"subDuration\"](from: now(), d: 21s))\n\t|> monitor[\"check\"](data: check, messageFn: messageFn, crit: crit)", + Flux: "import \"influxdata/influxdb/monitor\"\nimport \"experimental\"\nimport \"influxdata/influxdb/v1\"\n\ndata = from(bucket: \"telegraf\") |> range(start: -1h) |> filter(fn: (r) => r._field == \"usage_user\")\n\noption task = {name: \"name1\", every: 1m}\n\ncheck = {_check_id: \"020f755c3c082000\", _check_name: \"name1\", _type: \"deadman\", tags: {k1: \"v1\", k2: \"v2\"}}\ncrit = (r) => r[\"dead\"]\nmessageFn = (r) => \"msg1\"\n\ndata\n |> v1[\"fieldsAsCols\"]()\n |> monitor[\"deadman\"](t: experimental[\"subDuration\"](from: now(), d: 21s))\n |> monitor[\"check\"](data: check, messageFn: messageFn, crit: crit)\n", Every: "1m", }, }, @@ -369,7 +378,7 @@ func CreateCheck( name: "basic create check", fields: CheckFields{ IDGenerator: &mock.IDGenerator{ - IDFn: func() influxdb.ID { + IDFn: func() platform.ID { return MustIDBase16(checkTwoID) }, }, @@ -434,7 +443,7 @@ func CreateCheck( deadman1, threshold1, }, - tasks: []*influxdb.Task{ + tasks: []*taskmodel.Task{ { ID: MustIDBase16("020f755c3c082001"), Name: "name2", @@ -444,7 +453,29 @@ func CreateCheck( OwnerID: MustIDBase16("020f755c3c082005"), Status: "active", Every: "1m", - Flux: "package main\nimport \"influxdata/influxdb/monitor\"\nimport \"influxdata/influxdb/v1\"\n\ndata = from(bucket: \"telegraf\")\n\t|> range(start: -1m)\n\t|> filter(fn: (r) =>\n\t\t(r._field == \"usage_user\"))\n\noption task = {name: \"name2\", every: 1m}\n\ncheck = {\n\t_check_id: \"020f755c3c082001\",\n\t_check_name: \"name2\",\n\t_type: \"threshold\",\n\ttags: {k11: \"v11\"},\n}\nok = (r) =>\n\t(r[\"usage_user\"] < 1000.0)\nwarn = (r) =>\n\t(r[\"usage_user\"] > 2000.0)\ninfo = (r) =>\n\t(r[\"usage_user\"] < 1900.0 and r[\"usage_user\"] > 1500.0)\nmessageFn = (r) =>\n\t(\"msg2\")\n\ndata\n\t|> v1[\"fieldsAsCols\"]()\n\t|> monitor[\"check\"](\n\t\tdata: check,\n\t\tmessageFn: messageFn,\n\t\tok: ok,\n\t\twarn: warn,\n\t\tinfo: info,\n\t)", + Flux: `import "influxdata/influxdb/monitor" +import "influxdata/influxdb/v1" + +data = from(bucket: "telegraf") |> range(start: -1m) |> filter(fn: (r) => r._field == "usage_user") + +option task = {name: "name2", every: 1m} + +check = {_check_id: "020f755c3c082001", _check_name: "name2", _type: "threshold", tags: {k11: "v11"}} +ok = (r) => r["usage_user"] < 1000.0 +warn = (r) => r["usage_user"] > 2000.0 +info = (r) => r["usage_user"] < 1900.0 and r["usage_user"] > 1500.0 +messageFn = (r) => "msg2" + +data + |> v1["fieldsAsCols"]() + |> monitor["check"]( + data: check, + messageFn: messageFn, + ok: ok, + warn: warn, + info: info, + ) +`, }, }, }, @@ -453,7 +484,7 @@ func CreateCheck( name: "names should be unique within an organization", fields: CheckFields{ IDGenerator: &mock.IDGenerator{ - IDFn: func() influxdb.ID { + IDFn: func() platform.ID { return MustIDBase16(checkTwoID) }, }, @@ -508,8 +539,8 @@ func CreateCheck( checks: []influxdb.Check{ deadman1, }, - err: &influxdb.Error{ - Code: influxdb.EConflict, + err: &errors.Error{ + Code: errors.EConflict, Op: influxdb.OpCreateCheck, Msg: "check is not unique", }, @@ -519,7 +550,7 @@ func CreateCheck( name: "names should not be unique across organizations", fields: CheckFields{ IDGenerator: &mock.IDGenerator{ - IDFn: func() influxdb.ID { + IDFn: func() platform.ID { return MustIDBase16(checkTwoID) }, }, @@ -571,7 +602,7 @@ func CreateCheck( }, }, wants: wants{ - tasks: []*influxdb.Task{ + tasks: []*taskmodel.Task{ { ID: MustIDBase16("020f755c3c082001"), Name: "name1", @@ -581,7 +612,7 @@ func CreateCheck( OwnerID: MustIDBase16("020f755c3c082001"), Status: "active", Every: "1m", - Flux: "package main\nimport \"influxdata/influxdb/monitor\"\nimport \"influxdata/influxdb/v1\"\n\ndata = from(bucket: \"telegraf\")\n\t|> range(start: -1m)\n\t|> filter(fn: (r) =>\n\t\t(r._field == \"usage_user\"))\n\noption task = {name: \"name1\", every: 1m}\n\ncheck = {\n\t_check_id: \"020f755c3c082001\",\n\t_check_name: \"name1\",\n\t_type: \"threshold\",\n\ttags: {k11: \"v11\", k22: \"v22\"},\n}\nmessageFn = (r) =>\n\t(\"msg2\")\n\ndata\n\t|> v1[\"fieldsAsCols\"]()\n\t|> monitor[\"check\"](data: check, messageFn: messageFn)", + Flux: "import \"influxdata/influxdb/monitor\"\nimport \"influxdata/influxdb/v1\"\n\ndata = from(bucket: \"telegraf\") |> range(start: -1m) |> filter(fn: (r) => r._field == \"usage_user\")\n\noption task = {name: \"name1\", every: 1m}\n\ncheck = {_check_id: \"020f755c3c082001\", _check_name: \"name1\", _type: \"threshold\", tags: {k11: \"v11\", k22: \"v22\"}}\nmessageFn = (r) => \"msg2\"\n\ndata |> v1[\"fieldsAsCols\"]() |> monitor[\"check\"](data: check, messageFn: messageFn)\n", }, }, checks: []influxdb.Check{ @@ -670,8 +701,8 @@ func CreateCheck( }, wants: wants{ checks: []influxdb.Check{}, - err: &influxdb.Error{ - Code: influxdb.ENotFound, + err: &errors.Error{ + Code: errors.ENotFound, Msg: "organization not found", Op: influxdb.OpCreateCheck, }, @@ -698,7 +729,7 @@ func CreateCheck( t.Errorf("checks are different -got/+want\ndiff %s", diff) } - foundTasks, _, err := tasks.FindTasks(ctx, influxdb.TaskFilter{}) + foundTasks, _, err := tasks.FindTasks(ctx, taskmodel.TaskFilter{}) if err != nil { t.Fatal(err) } @@ -716,10 +747,10 @@ func FindCheckByID( t *testing.T, ) { type args struct { - id influxdb.ID + id platform.ID } type wants struct { - err *influxdb.Error + err *errors.Error check influxdb.Check } @@ -768,8 +799,8 @@ func FindCheckByID( id: MustIDBase16(threeID), }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.ENotFound, + err: &errors.Error{ + Code: errors.ENotFound, Op: influxdb.OpFindCheckByID, Msg: "check not found", }, @@ -799,11 +830,11 @@ func FindChecks( t *testing.T, ) { type args struct { - ID influxdb.ID + ID platform.ID name string organization string - OrgID influxdb.ID - userID influxdb.ID + OrgID platform.ID + userID platform.ID findOptions influxdb.FindOptions } @@ -1034,10 +1065,10 @@ func DeleteCheck( ) { type args struct { ID string - userID influxdb.ID + userID platform.ID } type wants struct { - err *influxdb.Error + err *errors.Error checks []influxdb.Check } @@ -1057,7 +1088,7 @@ func DeleteCheck( ID: MustIDBase16(orgOneID), }, }, - Tasks: []influxdb.TaskCreate{ + Tasks: []taskmodel.TaskCreate{ { Flux: `option task = { every: 10s, name: "foo" } data = from(bucket: "telegraf") |> range(start: -1m)`, @@ -1090,7 +1121,7 @@ data = from(bucket: "telegraf") |> range(start: -1m)`, ID: MustIDBase16(orgOneID), }, }, - Tasks: []influxdb.TaskCreate{ + Tasks: []taskmodel.TaskCreate{ { Flux: `option task = { every: 10s, name: "foo" } data = from(bucket: "telegraf") |> range(start: -1m)`, @@ -1108,10 +1139,10 @@ data = from(bucket: "telegraf") |> range(start: -1m)`, userID: MustIDBase16(sixID), }, wants: wants{ - err: &influxdb.Error{ + err: &errors.Error{ Op: influxdb.OpDeleteCheck, Msg: "check not found", - Code: influxdb.ENotFound, + Code: errors.ENotFound, }, checks: []influxdb.Check{ deadman1, @@ -1148,12 +1179,12 @@ func FindCheck( ) { type args struct { name string - OrgID influxdb.ID + OrgID platform.ID } type wants struct { check influxdb.Check - err *influxdb.Error + err *errors.Error } tests := []struct { @@ -1204,8 +1235,8 @@ func FindCheck( OrgID: MustIDBase16(orgOneID), }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.ENotFound, + err: &errors.Error{ + Code: errors.ENotFound, Op: influxdb.OpFindCheck, Msg: "check not found", }, @@ -1227,8 +1258,8 @@ func FindCheck( OrgID: MustIDBase16(orgOneID), }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.ENotFound, + err: &errors.Error{ + Code: errors.ENotFound, Op: influxdb.OpFindCheck, Msg: "check not found", }, @@ -1265,7 +1296,7 @@ func UpdateCheck( t *testing.T, ) { type args struct { - id influxdb.ID + id platform.ID check influxdb.Check } type wants struct { @@ -1290,7 +1321,7 @@ func UpdateCheck( ID: MustIDBase16(orgOneID), }, }, - Tasks: []influxdb.TaskCreate{ + Tasks: []taskmodel.TaskCreate{ { Flux: `option task = { every: 10s, name: "foo" } data = from(bucket: "telegraf") |> range(start: -1m)`, @@ -1495,8 +1526,8 @@ data = from(bucket: "telegraf") |> range(start: -1m)`, }, }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.EConflict, + err: &errors.Error{ + Code: errors.EConflict, Msg: "check name is not unique", }, }, @@ -1527,11 +1558,11 @@ func PatchCheck( t *testing.T, ) { type args struct { - id influxdb.ID + id platform.ID upd influxdb.CheckUpdate } type wants struct { - err *influxdb.Error + err *errors.Error check influxdb.Check } @@ -1548,7 +1579,7 @@ func PatchCheck( fields: CheckFields{ IDGenerator: mock.NewIDGenerator("0000000000000001", t), TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2007, 5, 4, 1, 2, 3, 0, time.UTC)}, - Tasks: []influxdb.TaskCreate{ + Tasks: []taskmodel.TaskCreate{ { Flux: `option task = { every: 10s, name: "foo" } data = from(bucket: "telegraf") |> range(start: -1m)`, @@ -1667,8 +1698,8 @@ data = from(bucket: "telegraf") |> range(start: -1m)`, }, }, wants: wants{ - err: &influxdb.Error{ - Code: influxdb.EConflict, + err: &errors.Error{ + Code: errors.EConflict, Msg: "check entity update conflicts with an existing entity", }, }, @@ -1692,8 +1723,8 @@ data = from(bucket: "telegraf") |> range(start: -1m)`, } // MustIDBase16 is an helper to ensure a correct ID is built during testing. -func MustIDBase16(s string) influxdb.ID { - id, err := influxdb.IDFromString(s) +func MustIDBase16(s string) platform.ID { + id, err := platform.IDFromString(s) if err != nil { panic(err) } @@ -1720,18 +1751,18 @@ func ErrorsEqual(t *testing.T, actual, expected error) { t.Errorf("expected error %s but received nil", expected.Error()) } - if influxdb.ErrorCode(expected) != influxdb.ErrorCode(actual) { + if errors.ErrorCode(expected) != errors.ErrorCode(actual) { t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error code %q but received %q", influxdb.ErrorCode(expected), influxdb.ErrorCode(actual)) + t.Errorf("expected error code %q but received %q", errors.ErrorCode(expected), errors.ErrorCode(actual)) } - if influxdb.ErrorMessage(expected) != influxdb.ErrorMessage(actual) { + if errors.ErrorMessage(expected) != errors.ErrorMessage(actual) { t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error message %q but received %q", influxdb.ErrorMessage(expected), influxdb.ErrorMessage(actual)) + t.Errorf("expected error message %q but received %q", errors.ErrorMessage(expected), errors.ErrorMessage(actual)) } } -func influxErrsEqual(t *testing.T, expected *influxdb.Error, actual error) { +func influxErrsEqual(t *testing.T, expected *errors.Error, actual error) { t.Helper() if expected != nil { @@ -1746,7 +1777,7 @@ func influxErrsEqual(t *testing.T, expected *influxdb.Error, actual error) { require.NoError(t, actual) return } - iErr, ok := actual.(*influxdb.Error) + iErr, ok := actual.(*errors.Error) require.True(t, ok) assert.Equal(t, expected.Code, iErr.Code) assert.Truef(t, strings.HasPrefix(iErr.Error(), expected.Error()), "expected: %s got err: %s", expected.Error(), actual.Error()) diff --git a/checks/service_test.go b/checks/service_test.go index 26ade32fb55..ffff6327e76 100644 --- a/checks/service_test.go +++ b/checks/service_test.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/influxdb/v2/kv/migration/all" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/query/fluxlang" + "github.com/influxdata/influxdb/v2/task/taskmodel" "github.com/influxdata/influxdb/v2/tenant" "go.uber.org/zap/zaptest" ) @@ -31,7 +32,7 @@ func TestCheckService(t *testing.T) { CheckService(initCheckService, t) } -func initCheckService(f CheckFields, t *testing.T) (influxdb.CheckService, influxdb.TaskService, string, func()) { +func initCheckService(f CheckFields, t *testing.T) (influxdb.CheckService, taskmodel.TaskService, string, func()) { store, closeKVStore := NewKVTestStore(t) logger := zaptest.NewLogger(t) diff --git a/chronograf/.bumpversion.cfg b/chronograf/.bumpversion.cfg deleted file mode 100644 index 1036c3b5e24..00000000000 --- a/chronograf/.bumpversion.cfg +++ /dev/null @@ -1,14 +0,0 @@ -[bumpversion] -current_version = 1.5.0.0 -files = README.md server/swagger.json server/swagger_v2.yml -parse = (?P\d+)\.(?P\d+)\.(?P\d+)\.(?P\d+) -serialize = {major}.{minor}.{patch}.{release} - -[bumpversion:part:release] - -[bumpversion:file:ui/package.json] -search = "version": "{current_version}" -parse = (?P\d+)\.(?P\d+)\.(?P\d+)-(?P\d+) -serialize = {major}.{minor}.{patch}-{release} -replace = "version": "{new_version}" - diff --git a/chronograf/.kapacitor/alerts.go b/chronograf/.kapacitor/alerts.go deleted file mode 100644 index b4f8e47000b..00000000000 --- a/chronograf/.kapacitor/alerts.go +++ /dev/null @@ -1,67 +0,0 @@ -package kapacitor - -import ( - "bytes" - "encoding/json" - "regexp" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/kapacitor/pipeline" - "github.com/influxdata/kapacitor/pipeline/tick" -) - -// AlertServices generates alert chaining methods to be attached to an alert from all rule Services -func AlertServices(rule chronograf.AlertRule) (string, error) { - node, err := addAlertNodes(rule.AlertNodes) - if err != nil { - return "", err - } - - if err := ValidateAlert(node); err != nil { - return "", err - } - return node, nil -} - -func addAlertNodes(handlers chronograf.AlertNodes) (string, error) { - octets, err := json.Marshal(&handlers) - if err != nil { - return "", err - } - - stream := &pipeline.StreamNode{} - pipe := pipeline.CreatePipelineSources(stream) - from := stream.From() - node := from.Alert() - if err = json.Unmarshal(octets, node); err != nil { - return "", err - } - - aster := tick.AST{} - err = aster.Build(pipe) - if err != nil { - return "", err - } - - var buf bytes.Buffer - aster.Program.Format(&buf, "", false) - rawTick := buf.String() - return toOldSchema(rawTick), nil -} - -var ( - removeID = regexp.MustCompile(`(?m)\s*\.id\(.*\)$`) // Remove to use ID variable - removeMessage = regexp.MustCompile(`(?m)\s*\.message\(.*\)$`) // Remove to use message variable - removeDetails = regexp.MustCompile(`(?m)\s*\.details\(.*\)$`) // Remove to use details variable - removeHistory = regexp.MustCompile(`(?m)\s*\.history\(21\)$`) // Remove default history -) - -func toOldSchema(rawTick string) string { - rawTick = strings.Replace(rawTick, "stream\n |from()\n |alert()", "", -1) - rawTick = removeID.ReplaceAllString(rawTick, "") - rawTick = removeMessage.ReplaceAllString(rawTick, "") - rawTick = removeDetails.ReplaceAllString(rawTick, "") - rawTick = removeHistory.ReplaceAllString(rawTick, "") - return rawTick -} diff --git a/chronograf/.kapacitor/alerts_test.go b/chronograf/.kapacitor/alerts_test.go deleted file mode 100644 index ff703d36414..00000000000 --- a/chronograf/.kapacitor/alerts_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package kapacitor - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestAlertServices(t *testing.T) { - tests := []struct { - name string - rule chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test several valid services", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - }, - want: `alert() - .email() - .victorOps() - .slack() -`, - }, - { - name: "Test single valid service", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - }, - }, - want: `alert() - .slack() -`, - }, - { - name: "Test pushoverservice", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - Pushover: []*chronograf.Pushover{ - { - Device: "asdf", - Title: "asdf", - Sound: "asdf", - URL: "http://moo.org", - URLTitle: "influxdata", - }, - }, - }, - }, - want: `alert() - .pushover() - .device('asdf') - .title('asdf') - .uRL('http://moo.org') - .uRLTitle('influxdata') - .sound('asdf') -`, - }, - { - name: "Test single valid service and property", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{ - { - Channel: "#general", - }, - }, - }, - }, - want: `alert() - .slack() - .channel('#general') -`, - }, - { - name: "Test tcp", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - TCPs: []*chronograf.TCP{ - { - Address: "myaddress:22", - }, - }, - }, - }, - want: `alert() - .tcp('myaddress:22') -`, - }, - { - name: "Test log", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - Log: []*chronograf.Log{ - { - FilePath: "/tmp/alerts.log", - }, - }, - }, - }, - want: `alert() - .log('/tmp/alerts.log') -`, - }, - { - name: "Test http as post", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - Posts: []*chronograf.Post{ - { - URL: "http://myaddress", - }, - }, - }, - }, - want: `alert() - .post('http://myaddress') -`, - }, - { - name: "Test post with headers", - rule: chronograf.AlertRule{ - AlertNodes: chronograf.AlertNodes{ - Posts: []*chronograf.Post{ - { - URL: "http://myaddress", - Headers: map[string]string{"key": "value"}, - }, - }, - }, - }, - want: `alert() - .post('http://myaddress') - .header('key', 'value') -`, - }, - } - for _, tt := range tests { - got, err := AlertServices(tt.rule) - if (err != nil) != tt.wantErr { - t.Errorf("%q. AlertServices() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if tt.wantErr { - continue - } - formatted, err := formatTick("alert()" + got) - if err != nil { - t.Errorf("%q. formatTick() error = %v", tt.name, err) - continue - } - if formatted != tt.want { - t.Errorf("%q. AlertServices() = %v, want %v", tt.name, formatted, tt.want) - } - } -} - -func Test_addAlertNodes(t *testing.T) { - tests := []struct { - name string - handlers chronograf.AlertNodes - want string - wantErr bool - }{ - { - name: "test email alerts", - handlers: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Email: []*chronograf.Email{ - { - To: []string{ - "me@me.com", "you@you.com", - }, - }, - }, - }, - want: ` - .stateChangesOnly() - .email() - .to('me@me.com') - .to('you@you.com') -`, - }, - { - name: "test pushover alerts", - handlers: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Pushover: []*chronograf.Pushover{ - { - Device: "asdf", - Title: "asdf", - Sound: "asdf", - URL: "http://moo.org", - URLTitle: "influxdata", - }, - }, - }, - want: ` - .stateChangesOnly() - .pushover() - .device('asdf') - .title('asdf') - .uRL('http://moo.org') - .uRLTitle('influxdata') - .sound('asdf') -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := addAlertNodes(tt.handlers) - if (err != nil) != tt.wantErr { - t.Errorf("addAlertNodes() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("addAlertNodes() =\n%v\n, want\n%v", got, tt.want) - } - }) - } -} diff --git a/chronograf/.kapacitor/ast.go b/chronograf/.kapacitor/ast.go deleted file mode 100644 index ecac0931bcc..00000000000 --- a/chronograf/.kapacitor/ast.go +++ /dev/null @@ -1,502 +0,0 @@ -package kapacitor - -import ( - "encoding/json" - "regexp" - "strconv" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/kapacitor/pipeline" - "github.com/influxdata/kapacitor/tick" - "github.com/influxdata/kapacitor/tick/ast" - "github.com/influxdata/kapacitor/tick/stateful" -) - -func varString(kapaVar string, vars map[string]tick.Var) (string, bool) { - var ok bool - v, ok := vars[kapaVar] - if !ok { - return "", ok - } - strVar, ok := v.Value.(string) - return strVar, ok -} - -func varValue(kapaVar string, vars map[string]tick.Var) (string, bool) { - var ok bool - v, ok := vars[kapaVar] - if !ok { - return "", ok - } - switch val := v.Value.(type) { - case string: - return val, true - case float64: - return strconv.FormatFloat(val, 'f', -1, 32), true - case int64: - return strconv.FormatInt(val, 10), true - case bool: - return strconv.FormatBool(val), true - case time.Time: - return val.String(), true - case *regexp.Regexp: - return val.String(), true - default: - return "", false - } -} - -func varDuration(kapaVar string, vars map[string]tick.Var) (string, bool) { - var ok bool - v, ok := vars[kapaVar] - if !ok { - return "", ok - } - durVar, ok := v.Value.(time.Duration) - if !ok { - return "", ok - } - return durVar.String(), true -} - -func varStringList(kapaVar string, vars map[string]tick.Var) ([]string, bool) { - v, ok := vars[kapaVar] - if !ok { - return nil, ok - } - list, ok := v.Value.([]tick.Var) - if !ok { - return nil, ok - } - - strs := make([]string, len(list)) - for i, l := range list { - s, ok := l.Value.(string) - if !ok { - return nil, ok - } - strs[i] = s - } - return strs, ok -} - -// WhereFilter filters the stream data in a TICKScript -type WhereFilter struct { - TagValues map[string][]string // Tags are filtered by an array of values - Operator string // Operator is == or != -} - -func varWhereFilter(vars map[string]tick.Var) (WhereFilter, bool) { - // All chronograf TICKScripts have whereFilters. - v, ok := vars["whereFilter"] - if !ok { - return WhereFilter{}, ok - } - filter := WhereFilter{} - filter.TagValues = make(map[string][]string) - - // All chronograf TICKScript's whereFilter use a lambda function. - value, ok := v.Value.(*ast.LambdaNode) - if !ok { - return WhereFilter{}, ok - } - - lambda := value.ExpressionString() - // Chronograf TICKScripts use lambda: TRUE as a pass-throug where clause - // if the script does not have a where clause set. - if lambda == "TRUE" { - return WhereFilter{}, true - } - - opSet := map[string]struct{}{} // All ops must be the same b/c queryConfig - // Otherwise the lambda function will be several "tag" op 'value' expressions. - var re = regexp.MustCompile(`(?U)"(.*)"\s+(==|!=)\s+'(.*)'`) - for _, match := range re.FindAllStringSubmatch(lambda, -1) { - tag, op, value := match[1], match[2], match[3] - opSet[op] = struct{}{} - values, ok := filter.TagValues[tag] - if !ok { - values = make([]string, 0) - } - values = append(values, value) - filter.TagValues[tag] = values - } - - // An obscure piece of the queryConfig is that the operator in ALL binary - // expressions just be the same. So, there must only be one operator - // in our opSet - if len(opSet) != 1 { - return WhereFilter{}, false - } - for op := range opSet { - if op != "==" && op != "!=" { - return WhereFilter{}, false - } - filter.Operator = op - } - return filter, true -} - -// CommonVars includes all the variables of a chronograf TICKScript -type CommonVars struct { - DB string - RP string - Measurement string - Name string - Message string - TriggerType string - GroupBy []string - Filter WhereFilter - Period string - Every string - Detail string -} - -// ThresholdVars represents the critical value where an alert occurs -type ThresholdVars struct { - Crit string -} - -// RangeVars represents the critical range where an alert occurs -type RangeVars struct { - Lower string - Upper string -} - -// RelativeVars represents the critical range and time in the past an alert occurs -type RelativeVars struct { - Shift string - Crit string -} - -// DeadmanVars represents a deadman alert -type DeadmanVars struct{} - -func extractCommonVars(vars map[string]tick.Var) (CommonVars, error) { - res := CommonVars{} - // All these variables must exist to be a chronograf TICKScript - // If any of these don't exist, then this isn't a tickscript we can process - var ok bool - res.DB, ok = varString("db", vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - res.RP, ok = varString("rp", vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - res.Measurement, ok = varString("measurement", vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - res.Name, ok = varString("name", vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - res.Message, ok = varString("message", vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - res.TriggerType, ok = varString("triggerType", vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - - // All chronograf TICKScripts have groupBy. Possible to be empty list though. - groups, ok := varStringList("groupBy", vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - res.GroupBy = groups - - // All chronograf TICKScripts must have a whereFitler. Could be empty. - res.Filter, ok = varWhereFilter(vars) - if !ok { - return CommonVars{}, ErrNotChronoTickscript - } - - // Some chronograf TICKScripts have details associated with the alert. - // Typically, this is the body of an email alert. - if detail, ok := varString("details", vars); ok { - res.Detail = detail - } - - // Relative and Threshold alerts may have an every variables - if every, ok := varDuration("every", vars); ok { - res.Every = every - } - - // All alert types may have a period variables - if period, ok := varDuration("period", vars); ok { - res.Period = period - } - return res, nil -} - -func extractAlertVars(vars map[string]tick.Var) (interface{}, error) { - // Depending on the type of the alert the variables set will be different - alertType, ok := varString("triggerType", vars) - if !ok { - return nil, ErrNotChronoTickscript - } - - switch alertType { - case Deadman: - return &DeadmanVars{}, nil - case Threshold: - if crit, ok := varValue("crit", vars); ok { - return &ThresholdVars{ - Crit: crit, - }, nil - } - r := &RangeVars{} - // Threshold Range alerts must have both an upper and lower bound - if r.Lower, ok = varValue("lower", vars); !ok { - return nil, ErrNotChronoTickscript - } - if r.Upper, ok = varValue("upper", vars); !ok { - return nil, ErrNotChronoTickscript - } - return r, nil - case Relative: - // Relative alerts must have a time shift and critical value - r := &RelativeVars{} - if r.Shift, ok = varDuration("shift", vars); !ok { - return nil, ErrNotChronoTickscript - } - if r.Crit, ok = varValue("crit", vars); !ok { - return nil, ErrNotChronoTickscript - } - return r, nil - default: - return nil, ErrNotChronoTickscript - } -} - -// FieldFunc represents the field used as the alert value and its optional aggregate function -type FieldFunc struct { - Field string - Func string -} - -func extractFieldFunc(script chronograf.TICKScript) FieldFunc { - // If the TICKScript is relative or threshold alert with an aggregate - // then the aggregate function and field is in the form |func('field').as('value') - var re = regexp.MustCompile(`(?Um)\|(\w+)\('(.*)'\)\s*\.as\('value'\)`) - for _, match := range re.FindAllStringSubmatch(string(script), -1) { - fn, field := match[1], match[2] - return FieldFunc{ - Field: field, - Func: fn, - } - } - - // If the alert does not have an aggregate then the the value function will - // be this form: |eval(lambda: "%s").as('value') - re = regexp.MustCompile(`(?Um)\|eval\(lambda: "(.*)"\)\s*\.as\('value'\)`) - for _, match := range re.FindAllStringSubmatch(string(script), -1) { - field := match[1] - return FieldFunc{ - Field: field, - } - } - // Otherwise, if this could be a deadman alert and not have a FieldFunc - return FieldFunc{} -} - -// CritCondition represents the operators that determine when the alert should go critical -type CritCondition struct { - Operators []string -} - -func extractCrit(script chronograf.TICKScript) CritCondition { - // Threshold and relative alerts have the form .crit(lambda: "value" op crit) - // Threshold range alerts have the form .crit(lambda: "value" op lower op "value" op upper) - var re = regexp.MustCompile(`(?Um)\.crit\(lambda:\s+"value"\s+(.*)\s+crit\)`) - for _, match := range re.FindAllStringSubmatch(string(script), -1) { - op := match[1] - return CritCondition{ - Operators: []string{ - op, - }, - } - } - re = regexp.MustCompile(`(?Um)\.crit\(lambda:\s+"value"\s+(.*)\s+lower\s+(.*)\s+"value"\s+(.*)\s+upper\)`) - for _, match := range re.FindAllStringSubmatch(string(script), -1) { - lower, compound, upper := match[1], match[2], match[3] - return CritCondition{ - Operators: []string{ - lower, - compound, - upper, - }, - } - } - - // It's possible to not have a critical condition if this is - // a deadman alert - return CritCondition{} -} - -// alertType reads the TICKscript and returns the specific -// alerting type. If it is unable to determine it will -// return ErrNotChronoTickscript -func alertType(script chronograf.TICKScript) (string, error) { - t := string(script) - if strings.Contains(t, `var triggerType = 'threshold'`) { - if strings.Contains(t, `var crit = `) { - return Threshold, nil - } else if strings.Contains(t, `var lower = `) && strings.Contains(t, `var upper = `) { - return ThresholdRange, nil - } - return "", ErrNotChronoTickscript - } else if strings.Contains(t, `var triggerType = 'relative'`) { - if strings.Contains(t, `eval(lambda: float("current.value" - "past.value"))`) { - return ChangeAmount, nil - } else if strings.Contains(t, `|eval(lambda: abs(float("current.value" - "past.value")) / float("past.value") * 100.0)`) { - return ChangePercent, nil - } - return "", ErrNotChronoTickscript - } else if strings.Contains(t, `var triggerType = 'deadman'`) { - return Deadman, nil - } - return "", ErrNotChronoTickscript -} - -// Reverse converts tickscript to an AlertRule -func Reverse(script chronograf.TICKScript) (chronograf.AlertRule, error) { - rule := chronograf.AlertRule{ - Query: &chronograf.QueryConfig{}, - } - t, err := alertType(script) - if err != nil { - return rule, err - } - - scope := stateful.NewScope() - template, err := pipeline.CreateTemplatePipeline(string(script), pipeline.StreamEdge, scope, &deadman{}) - if err != nil { - return chronograf.AlertRule{}, err - } - vars := template.Vars() - - commonVars, err := extractCommonVars(vars) - if err != nil { - return rule, err - } - alertVars, err := extractAlertVars(vars) - if err != nil { - return rule, err - } - fieldFunc := extractFieldFunc(script) - critCond := extractCrit(script) - - switch t { - case Threshold, ChangeAmount, ChangePercent: - if len(critCond.Operators) != 1 { - return rule, ErrNotChronoTickscript - } - case ThresholdRange: - if len(critCond.Operators) != 3 { - return rule, ErrNotChronoTickscript - } - } - - rule.Name = commonVars.Name - rule.Trigger = commonVars.TriggerType - rule.Message = commonVars.Message - rule.Details = commonVars.Detail - rule.Query.Database = commonVars.DB - rule.Query.RetentionPolicy = commonVars.RP - rule.Query.Measurement = commonVars.Measurement - rule.Query.GroupBy.Tags = commonVars.GroupBy - if commonVars.Filter.Operator == "==" { - rule.Query.AreTagsAccepted = true - } - rule.Query.Tags = commonVars.Filter.TagValues - - if t == Deadman { - rule.TriggerValues.Period = commonVars.Period - } else { - rule.Query.GroupBy.Time = commonVars.Period - rule.Every = commonVars.Every - if fieldFunc.Func != "" { - rule.Query.Fields = []chronograf.Field{ - { - Type: "func", - Value: fieldFunc.Func, - Args: []chronograf.Field{ - { - Value: fieldFunc.Field, - Type: "field", - }, - }, - }, - } - } else { - rule.Query.Fields = []chronograf.Field{ - { - Type: "field", - Value: fieldFunc.Field, - }, - } - } - } - - switch t { - case ChangeAmount, ChangePercent: - rule.TriggerValues.Change = t - rule.TriggerValues.Operator, err = chronoOperator(critCond.Operators[0]) - if err != nil { - return rule, ErrNotChronoTickscript - } - v, ok := alertVars.(*RelativeVars) - if !ok { - return rule, ErrNotChronoTickscript - } - rule.TriggerValues.Value = v.Crit - rule.TriggerValues.Shift = v.Shift - case Threshold: - rule.TriggerValues.Operator, err = chronoOperator(critCond.Operators[0]) - if err != nil { - return rule, ErrNotChronoTickscript - } - v, ok := alertVars.(*ThresholdVars) - if !ok { - return rule, ErrNotChronoTickscript - } - rule.TriggerValues.Value = v.Crit - case ThresholdRange: - rule.TriggerValues.Operator, err = chronoRangeOperators(critCond.Operators) - v, ok := alertVars.(*RangeVars) - if !ok { - return rule, ErrNotChronoTickscript - } - rule.TriggerValues.Value = v.Lower - rule.TriggerValues.RangeValue = v.Upper - } - - p, err := pipeline.CreatePipeline(string(script), pipeline.StreamEdge, stateful.NewScope(), &deadman{}, vars) - if err != nil { - return chronograf.AlertRule{}, err - } - - err = extractAlertNodes(p, &rule) - return rule, err -} - -func extractAlertNodes(p *pipeline.Pipeline, rule *chronograf.AlertRule) error { - return p.Walk(func(n pipeline.Node) error { - switch node := n.(type) { - case *pipeline.AlertNode: - octets, err := json.MarshalIndent(node, "", " ") - if err != nil { - return err - } - return json.Unmarshal(octets, &rule.AlertNodes) - } - return nil - }) -} diff --git a/chronograf/.kapacitor/ast_test.go b/chronograf/.kapacitor/ast_test.go deleted file mode 100644 index 3938ebcd18b..00000000000 --- a/chronograf/.kapacitor/ast_test.go +++ /dev/null @@ -1,1569 +0,0 @@ -package kapacitor - -import ( - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestReverse(t *testing.T) { - tests := []struct { - name string - script chronograf.TICKScript - want chronograf.AlertRule - wantErr bool - }{ - { - name: "simple stream tickscript", - script: chronograf.TICKScript(` - var name = 'name' - var triggerType = 'threshold' - var every = 30s - var period = 10m - var groupBy = ['host', 'cluster_id'] - var db = 'telegraf' - var rp = 'autogen' - var measurement = 'cpu' - var message = 'message' - var details = 'details' - var crit = 90 - var idVar = name + ':{{.Group}}' - var idTag = 'alertID' - var levelTag = 'level' - var messageField = 'message' - var durationField = 'duration' - var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email('howdy@howdy.com', 'doody@doody.com') - .log('/tmp/alerts.log') - .post('http://backin.tm') - .endpoint('myendpoint') - .header('key', 'value') - `), - - want: chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - { - To: []string{"howdy@howdy.com", "doody@doody.com"}, - }, - }, - Log: []*chronograf.Log{ - { - FilePath: "/tmp/alerts.log", - }, - }, - Posts: []*chronograf.Post{ - { - URL: "http://backin.tm", - Headers: map[string]string{"key": "value"}, - }, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Details: "details", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - RetentionPolicy: "autogen", - Measurement: "cpu", - Fields: []chronograf.Field{ - { - Value: "mean", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Type: "func", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m0s", - Tags: []string{"host", "cluster_id"}, - }, - Tags: map[string][]string{ - "cpu": []string{ - "cpu_total", - }, - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test Threshold", - script: `var db = 'telegraf' - - var rp = 'autogen' - - var measurement = 'cpu' - - var groupBy = ['host', 'cluster_id'] - - var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - - var period = 10m - - var every = 30s - - var name = 'name' - - var idVar = name + ':{{.Group}}' - - var message = 'message' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'threshold' - - var crit = 90 - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - - var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - - trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output')`, - want: chronograf.AlertRule{ - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Type: "func", - }, - }, - Tags: map[string][]string{ - "cpu": []string{"cpu_total"}, - "host": []string{"acc-0eabc309-eu-west-1-data-3", "prod"}, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m0s", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - Every: "30s", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - { - To: []string{}, - }, - }, - }, - Message: "message", - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90", - }, - Name: "name", - }, - }, - { - name: "Test haproxy string comparison", - script: `var db = 'influxdb' - - var rp = 'autogen' - - var measurement = 'haproxy' - - var groupBy = ['pxname'] - - var whereFilter = lambda: TRUE - - var period = 10s - - var every = 10s - - var name = 'haproxy' - - var idVar = name + ':{{.Group}}' - - var message = 'Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} ' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'threshold' - - var details = 'Email template' - - var crit = 'DOWN' - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |last('status') - .as('value') - - var trigger = data - |alert() - .crit(lambda: "value" == crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .details(details) - .email() - - trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output') - `, - want: chronograf.AlertRule{ - Name: "haproxy", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "equal to", - Value: "DOWN", - }, - Every: "10s", - Message: `Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} `, - Details: "Email template", - Query: &chronograf.QueryConfig{ - Database: "influxdb", - RetentionPolicy: "autogen", - Measurement: "haproxy", - Fields: []chronograf.Field{ - { - Value: "last", - Args: []chronograf.Field{ - { - Value: "status", - Type: "field", - }, - }, - Type: "func", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10s", - Tags: []string{"pxname"}, - }, - AreTagsAccepted: false, - }, - }, - }, - { - name: "Test haproxy", - script: `var db = 'influxdb' - - var rp = 'autogen' - - var measurement = 'haproxy' - - var groupBy = ['pxname'] - - var whereFilter = lambda: TRUE - - var period = 10s - - var every = 10s - - var name = 'haproxy' - - var idVar = name + ':{{.Group}}' - - var message = 'Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} ' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'threshold' - - var details = 'Email template' - - var crit = 'DOWN' - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |last('status') - .as('value') - - var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .details(details) - .email() - - trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output') - `, - want: chronograf.AlertRule{ - Name: "haproxy", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "DOWN", - }, - Every: "10s", - Message: `Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} `, - Details: "Email template", - Query: &chronograf.QueryConfig{ - Database: "influxdb", - RetentionPolicy: "autogen", - Measurement: "haproxy", - Fields: []chronograf.Field{ - { - Value: "last", - Args: []chronograf.Field{ - { - Value: "status", - Type: "field", - }, - }, - Type: "func", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10s", - Tags: []string{"pxname"}, - }, - AreTagsAccepted: false, - }, - }, - }, - { - name: "Test valid template alert with detail", - script: `var db = 'telegraf' - - var rp = 'autogen' - - var measurement = 'cpu' - - var groupBy = ['host', 'cluster_id'] - - var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - - var period = 10m - - var every = 30s - - var name = 'name' - - var idVar = name + ':{{.Group}}' - - var message = 'message' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'threshold' - - var details = 'details' - - var crit = 90 - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - - var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .details(details) - .slack() - .victorOps() - .email() - - trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output') - `, - want: chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Details: "details", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Type: "func", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m0s", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test valid threshold inside range", - script: `var db = 'telegraf' - - var rp = 'autogen' - - var measurement = 'cpu' - - var groupBy = ['host', 'cluster_id'] - - var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - - var period = 10m - - var every = 30s - - var name = 'name' - - var idVar = name + ':{{.Group}}' - - var message = 'message' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'threshold' - - var lower = 90 - - var upper = 100 - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - - var trigger = data - |alert() - .crit(lambda: "value" >= lower AND "value" <= upper) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - - trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output') - `, - want: chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "inside range", - Value: "90", - RangeValue: "100", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Type: "func", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m0s", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test valid threshold outside range", - script: `var db = 'telegraf' - - var rp = 'autogen' - - var measurement = 'cpu' - - var groupBy = ['host', 'cluster_id'] - - var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - - var period = 10m - - var every = 30s - - var name = 'name' - - var idVar = name + ':{{.Group}}' - - var message = 'message' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'threshold' - - var lower = 90 - - var upper = 100 - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - - var trigger = data - |alert() - .crit(lambda: "value" < lower OR "value" > upper) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - - trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output') - `, - want: chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "outside range", - Value: "90", - RangeValue: "100", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Type: "func", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m0s", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test threshold no aggregate", - script: `var db = 'telegraf' - - var rp = 'autogen' - - var measurement = 'cpu' - - var groupBy = ['host', 'cluster_id'] - - var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - - var name = 'name' - - var idVar = name + ':{{.Group}}' - - var message = 'message' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'threshold' - - var crit = 90 - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "usage_user") - .as('value') - - var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - - trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output') - `, - want: chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90", - }, - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test relative alert", - script: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'relative' - -var shift = 1m - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: abs(float("current.value" - "past.value")) / float("past.value") * 100.0) - .keep() - .as('value') - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - -trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - want: chronograf.AlertRule{ - Name: "name", - Trigger: "relative", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Change: "% change", - Shift: "1m0s", - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Type: "func", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m0s", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test relative change", - script: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'relative' - -var shift = 1m - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: float("current.value" - "past.value")) - .keep() - .as('value') - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - -trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - want: chronograf.AlertRule{ - Name: "name", - Trigger: "relative", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Change: "change", - Shift: "1m0s", - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Type: "func", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m0s", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test deadman", - script: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'deadman' - -var threshold = 0.0 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - -var trigger = data - |deadman(threshold, period) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - -trigger - |eval(lambda: "emitted") - .as('value') - .keep('value', messageField, durationField) - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - want: chronograf.AlertRule{ - Name: "name", - Trigger: "deadman", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - Slack: []*chronograf.Slack{ - {}, - }, - VictorOps: []*chronograf.VictorOps{ - {}, - }, - Email: []*chronograf.Email{ - {To: []string{}}, - }, - }, - TriggerValues: chronograf.TriggerValues{ - Period: "10m0s", - }, - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - }, - }, - { - name: "Test threshold lambda", - script: `var db = '_internal' - -var rp = 'monitor' - -var measurement = 'cq' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var name = 'rule 1' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90000 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "queryOk") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - want: chronograf.AlertRule{ - Name: "rule 1", - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90000", - }, - Every: "", - Message: "", - Details: "", - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - }, - Query: &chronograf.QueryConfig{ - Database: "_internal", - RetentionPolicy: "monitor", - Measurement: "cq", - Fields: []chronograf.Field{ - { - Value: "queryOk", - Type: "field", - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - AreTagsAccepted: false, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := Reverse(tt.script) - if (err != nil) != tt.wantErr { - t.Errorf("reverse error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("reverse = %s", cmp.Diff(got, tt.want)) - if tt.want.Query != nil { - if got.Query == nil { - t.Errorf("reverse = got nil QueryConfig") - } else if !cmp.Equal(*got.Query, *tt.want.Query) { - t.Errorf("reverse = QueryConfig not equal %s", cmp.Diff(*got.Query, *tt.want.Query)) - } - } - } - }) - } -} diff --git a/chronograf/.kapacitor/client.go b/chronograf/.kapacitor/client.go deleted file mode 100644 index 19ea2aeee10..00000000000 --- a/chronograf/.kapacitor/client.go +++ /dev/null @@ -1,415 +0,0 @@ -package kapacitor - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/id" - client "github.com/influxdata/kapacitor/client/v1" -) - -const ( - // Prefix is prepended to the ID of all alerts - Prefix = "chronograf-v1-" - - // FetchRate is the rate Paginating Kapacitor Clients will consume responses - FetchRate = 100 -) - -// Client communicates to kapacitor -type Client struct { - URL string - Username string - Password string - InsecureSkipVerify bool - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) -} - -// KapaClient represents a connection to a kapacitor instance -type KapaClient interface { - CreateTask(opt client.CreateTaskOptions) (client.Task, error) - Task(link client.Link, opt *client.TaskOptions) (client.Task, error) - ListTasks(opt *client.ListTasksOptions) ([]client.Task, error) - UpdateTask(link client.Link, opt client.UpdateTaskOptions) (client.Task, error) - DeleteTask(link client.Link) error -} - -// NewClient creates a client that interfaces with Kapacitor tasks -func NewClient(url, username, password string, insecureSkipVerify bool) *Client { - return &Client{ - URL: url, - Username: username, - Password: password, - InsecureSkipVerify: insecureSkipVerify, - ID: &id.UUID{}, - Ticker: &Alert{}, - kapaClient: NewKapaClient, - } -} - -// Task represents a running kapacitor task -type Task struct { - ID string // Kapacitor ID - Href string // Kapacitor relative URI - HrefOutput string // Kapacitor relative URI to HTTPOutNode - Rule chronograf.AlertRule // Rule is the rule that represents this Task - TICKScript chronograf.TICKScript // TICKScript is the running script -} - -// NewTask creates a task from a kapacitor client task -func NewTask(task *client.Task) *Task { - dbrps := make([]chronograf.DBRP, len(task.DBRPs)) - for i := range task.DBRPs { - dbrps[i].DB = task.DBRPs[i].Database - dbrps[i].RP = task.DBRPs[i].RetentionPolicy - } - - script := chronograf.TICKScript(task.TICKscript) - rule, err := Reverse(script) - if err != nil { - rule = chronograf.AlertRule{ - Name: task.ID, - Query: nil, - } - } - - rule.ID = task.ID - rule.TICKScript = script - rule.Type = task.Type.String() - rule.DBRPs = dbrps - rule.Status = task.Status.String() - rule.Executing = task.Executing - rule.Error = task.Error - rule.Created = task.Created - rule.Modified = task.Modified - rule.LastEnabled = task.LastEnabled - return &Task{ - ID: task.ID, - Href: task.Link.Href, - HrefOutput: HrefOutput(task.ID), - Rule: rule, - } -} - -// HrefOutput returns the link to a kapacitor task httpOut Node given an id -func HrefOutput(ID string) string { - return fmt.Sprintf("/kapacitor/v1/tasks/%s/%s", ID, HTTPEndpoint) -} - -// Href returns the link to a kapacitor task given an id -func (c *Client) Href(ID string) string { - return fmt.Sprintf("/kapacitor/v1/tasks/%s", ID) -} - -// HrefOutput returns the link to a kapacitor task httpOut Node given an id -func (c *Client) HrefOutput(ID string) string { - return HrefOutput(ID) -} - -// Create builds and POSTs a tickscript to kapacitor -func (c *Client) Create(ctx context.Context, rule chronograf.AlertRule) (*Task, error) { - var opt *client.CreateTaskOptions - var err error - if rule.Query != nil { - opt, err = c.createFromQueryConfig(rule) - } else { - opt, err = c.createFromTick(rule) - } - - if err != nil { - return nil, err - } - - kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify) - if err != nil { - return nil, err - } - - task, err := kapa.CreateTask(*opt) - if err != nil { - return nil, err - } - - return NewTask(&task), nil -} - -func (c *Client) createFromTick(rule chronograf.AlertRule) (*client.CreateTaskOptions, error) { - dbrps := make([]client.DBRP, len(rule.DBRPs)) - for i := range rule.DBRPs { - dbrps[i] = client.DBRP{ - Database: rule.DBRPs[i].DB, - RetentionPolicy: rule.DBRPs[i].RP, - } - } - - status := client.Enabled - if rule.Status != "" { - if err := status.UnmarshalText([]byte(rule.Status)); err != nil { - return nil, err - } - } - - taskType := client.StreamTask - if rule.Type != "stream" { - if err := taskType.UnmarshalText([]byte(rule.Type)); err != nil { - return nil, err - } - } - - return &client.CreateTaskOptions{ - ID: rule.Name, - Type: taskType, - DBRPs: dbrps, - TICKscript: string(rule.TICKScript), - Status: status, - }, nil -} - -func (c *Client) createFromQueryConfig(rule chronograf.AlertRule) (*client.CreateTaskOptions, error) { - id, err := c.ID.Generate() - if err != nil { - return nil, err - } - - script, err := c.Ticker.Generate(rule) - if err != nil { - return nil, err - } - - kapaID := Prefix + id - return &client.CreateTaskOptions{ - ID: kapaID, - Type: toTask(rule.Query), - DBRPs: []client.DBRP{{Database: rule.Query.Database, RetentionPolicy: rule.Query.RetentionPolicy}}, - TICKscript: string(script), - Status: client.Enabled, - }, nil -} - -// Delete removes tickscript task from kapacitor -func (c *Client) Delete(ctx context.Context, href string) error { - kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify) - if err != nil { - return err - } - return kapa.DeleteTask(client.Link{Href: href}) -} - -func (c *Client) updateStatus(ctx context.Context, href string, status client.TaskStatus) (*Task, error) { - kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify) - if err != nil { - return nil, err - } - - opts := client.UpdateTaskOptions{ - Status: status, - } - - task, err := kapa.UpdateTask(client.Link{Href: href}, opts) - if err != nil { - return nil, err - } - - return NewTask(&task), nil -} - -// Disable changes the tickscript status to disabled for a given href. -func (c *Client) Disable(ctx context.Context, href string) (*Task, error) { - return c.updateStatus(ctx, href, client.Disabled) -} - -// Enable changes the tickscript status to disabled for a given href. -func (c *Client) Enable(ctx context.Context, href string) (*Task, error) { - return c.updateStatus(ctx, href, client.Enabled) -} - -// Status returns the status of a task in kapacitor -func (c *Client) Status(ctx context.Context, href string) (string, error) { - s, err := c.status(ctx, href) - if err != nil { - return "", err - } - - return s.String(), nil -} - -func (c *Client) status(ctx context.Context, href string) (client.TaskStatus, error) { - kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify) - if err != nil { - return 0, err - } - task, err := kapa.Task(client.Link{Href: href}, nil) - if err != nil { - return 0, err - } - - return task.Status, nil -} - -// All returns all tasks in kapacitor -func (c *Client) All(ctx context.Context) (map[string]*Task, error) { - kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify) - if err != nil { - return nil, err - } - - // Only get the status, id and link section back - opts := &client.ListTasksOptions{} - tasks, err := kapa.ListTasks(opts) - if err != nil { - return nil, err - } - - all := map[string]*Task{} - for _, task := range tasks { - all[task.ID] = NewTask(&task) - } - return all, nil -} - -// Reverse builds a chronograf.AlertRule and its QueryConfig from a tickscript -func (c *Client) Reverse(id string, script chronograf.TICKScript) chronograf.AlertRule { - rule, err := Reverse(script) - if err != nil { - return chronograf.AlertRule{ - ID: id, - Name: id, - Query: nil, - TICKScript: script, - } - } - rule.ID = id - rule.TICKScript = script - return rule -} - -// Get returns a single alert in kapacitor -func (c *Client) Get(ctx context.Context, id string) (*Task, error) { - kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify) - if err != nil { - return nil, err - } - href := c.Href(id) - task, err := kapa.Task(client.Link{Href: href}, nil) - if err != nil { - return nil, chronograf.ErrAlertNotFound - } - - return NewTask(&task), nil -} - -// Update changes the tickscript of a given id. -func (c *Client) Update(ctx context.Context, href string, rule chronograf.AlertRule) (*Task, error) { - kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify) - if err != nil { - return nil, err - } - - prevStatus, err := c.status(ctx, href) - if err != nil { - return nil, err - } - - var opt *client.UpdateTaskOptions - if rule.Query != nil { - opt, err = c.updateFromQueryConfig(rule) - } else { - opt, err = c.updateFromTick(rule) - } - if err != nil { - return nil, err - } - - task, err := kapa.UpdateTask(client.Link{Href: href}, *opt) - if err != nil { - return nil, err - } - - // Now enable the task if previously enabled - if prevStatus == client.Enabled { - if _, err := c.Enable(ctx, href); err != nil { - return nil, err - } - } - - return NewTask(&task), nil -} - -func (c *Client) updateFromQueryConfig(rule chronograf.AlertRule) (*client.UpdateTaskOptions, error) { - script, err := c.Ticker.Generate(rule) - if err != nil { - return nil, err - } - - // We need to disable the kapacitor task followed by enabling it during update. - return &client.UpdateTaskOptions{ - TICKscript: string(script), - Status: client.Disabled, - Type: toTask(rule.Query), - DBRPs: []client.DBRP{ - { - Database: rule.Query.Database, - RetentionPolicy: rule.Query.RetentionPolicy, - }, - }, - }, nil -} - -func (c *Client) updateFromTick(rule chronograf.AlertRule) (*client.UpdateTaskOptions, error) { - dbrps := make([]client.DBRP, len(rule.DBRPs)) - for i := range rule.DBRPs { - dbrps[i] = client.DBRP{ - Database: rule.DBRPs[i].DB, - RetentionPolicy: rule.DBRPs[i].RP, - } - } - - taskType := client.StreamTask - if rule.Type != "stream" { - if err := taskType.UnmarshalText([]byte(rule.Type)); err != nil { - return nil, err - } - } - - // We need to disable the kapacitor task followed by enabling it during update. - return &client.UpdateTaskOptions{ - TICKscript: string(rule.TICKScript), - Status: client.Disabled, - Type: taskType, - DBRPs: dbrps, - }, nil -} - -func toTask(q *chronograf.QueryConfig) client.TaskType { - if q == nil || q.RawText == nil || *q.RawText == "" { - return client.StreamTask - } - return client.BatchTask -} - -// NewKapaClient creates a Kapacitor client connection -func NewKapaClient(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - var creds *client.Credentials - if username != "" { - creds = &client.Credentials{ - Method: client.UserAuthentication, - Username: username, - Password: password, - } - } - - clnt, err := client.New(client.Config{ - URL: url, - Credentials: creds, - InsecureSkipVerify: insecureSkipVerify, - }) - - if err != nil { - return clnt, err - } - - return &PaginatingKapaClient{clnt, FetchRate}, nil -} diff --git a/chronograf/.kapacitor/client_test.go b/chronograf/.kapacitor/client_test.go deleted file mode 100644 index 4f273c886fd..00000000000 --- a/chronograf/.kapacitor/client_test.go +++ /dev/null @@ -1,1653 +0,0 @@ -package kapacitor - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" - client "github.com/influxdata/kapacitor/client/v1" -) - -type MockKapa struct { - ResTask client.Task - ResTasks []client.Task - TaskError error - UpdateError error - CreateError error - ListError error - DeleteError error - LastStatus client.TaskStatus - - *client.CreateTaskOptions - client.Link - *client.TaskOptions - *client.ListTasksOptions - *client.UpdateTaskOptions -} - -func (m *MockKapa) CreateTask(opt client.CreateTaskOptions) (client.Task, error) { - m.CreateTaskOptions = &opt - return m.ResTask, m.CreateError -} - -func (m *MockKapa) Task(link client.Link, opt *client.TaskOptions) (client.Task, error) { - m.Link = link - m.TaskOptions = opt - return m.ResTask, m.TaskError -} - -func (m *MockKapa) ListTasks(opt *client.ListTasksOptions) ([]client.Task, error) { - m.ListTasksOptions = opt - return m.ResTasks, m.ListError -} - -func (m *MockKapa) UpdateTask(link client.Link, opt client.UpdateTaskOptions) (client.Task, error) { - m.Link = link - m.LastStatus = opt.Status - - if m.UpdateTaskOptions == nil { - m.UpdateTaskOptions = &opt - } - - return m.ResTask, m.UpdateError -} - -func (m *MockKapa) DeleteTask(link client.Link) error { - m.Link = link - return m.DeleteError -} - -type MockID struct { - ID string -} - -func (m *MockID) Generate() (string, error) { - return m.ID, nil -} - -func TestClient_All(t *testing.T) { - type fields struct { - URL string - Username string - Password string - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) - } - type args struct { - ctx context.Context - } - kapa := &MockKapa{} - tests := []struct { - name string - fields fields - args args - want map[string]*Task - wantErr bool - resTask client.Task - resTasks []client.Task - resError error - - createTaskOptions client.CreateTaskOptions - link client.Link - taskOptions *client.TaskOptions - listTasksOptions *client.ListTasksOptions - updateTaskOptions client.UpdateTaskOptions - }{ - { - name: "return no tasks", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - }, - listTasksOptions: &client.ListTasksOptions{}, - want: map[string]*Task{}, - }, - { - name: "return a non-reversible task", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - }, - listTasksOptions: &client.ListTasksOptions{}, - resTasks: []client.Task{ - client.Task{ - ID: "howdy", - Status: client.Enabled, - }, - }, - want: map[string]*Task{ - "howdy": &Task{ - ID: "howdy", - - HrefOutput: "/kapacitor/v1/tasks/howdy/output", - Rule: chronograf.AlertRule{ - ID: "howdy", - Name: "howdy", - TICKScript: "", - Type: "invalid", - Status: "enabled", - DBRPs: []chronograf.DBRP{}, - }, - TICKScript: "", - }, - }, - }, - { - name: "return a reversible task", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - }, - listTasksOptions: &client.ListTasksOptions{}, - resTasks: []client.Task{ - client.Task{ - ID: "rule 1", - Status: client.Enabled, - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "_internal", - RetentionPolicy: "autogen", - }, - }, - TICKscript: `var db = '_internal' - -var rp = 'monitor' - -var measurement = 'cq' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var name = 'rule 1' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90000 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "queryOk") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - }, - }, - want: map[string]*Task{ - "rule 1": &Task{ - ID: "rule 1", - - HrefOutput: "/kapacitor/v1/tasks/rule 1/output", - Rule: chronograf.AlertRule{ - DBRPs: []chronograf.DBRP{ - { - - DB: "_internal", - RP: "autogen", - }, - }, - Type: "stream", - Status: "enabled", - ID: "rule 1", - Name: "rule 1", - TICKScript: `var db = '_internal' - -var rp = 'monitor' - -var measurement = 'cq' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var name = 'rule 1' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90000 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "queryOk") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90000", - }, - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - }, - Query: &chronograf.QueryConfig{ - Database: "_internal", - RetentionPolicy: "monitor", - Measurement: "cq", - Fields: []chronograf.Field{ - { - Value: "queryOk", - Type: "field", - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - AreTagsAccepted: false, - }, - }, - }, - }, - }, - } - for _, tt := range tests { - kapa.ResTask = tt.resTask - kapa.ResTasks = tt.resTasks - kapa.ListError = tt.resError - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - URL: tt.fields.URL, - Username: tt.fields.Username, - Password: tt.fields.Password, - ID: tt.fields.ID, - Ticker: tt.fields.Ticker, - kapaClient: tt.fields.kapaClient, - } - got, err := c.All(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("Client.All() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf("%q. Client.All() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.All() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.TaskOptions, tt.taskOptions) { - t.Errorf("Client.All() = taskOptions %v, want %v", kapa.TaskOptions, tt.taskOptions) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.All() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.Link, tt.link) { - t.Errorf("Client.All() = Link %v, want %v", kapa.Link, tt.link) - } - }) - } -} - -func TestClient_Get(t *testing.T) { - type fields struct { - URL string - Username string - Password string - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) - } - type args struct { - ctx context.Context - id string - } - kapa := &MockKapa{} - tests := []struct { - name string - fields fields - args args - want *Task - wantErr bool - resTask client.Task - resTasks []client.Task - resError error - - createTaskOptions client.CreateTaskOptions - link client.Link - taskOptions *client.TaskOptions - listTasksOptions *client.ListTasksOptions - updateTaskOptions client.UpdateTaskOptions - }{ - { - name: "return no task", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - }, - args: args{ - id: "myid", - }, - taskOptions: nil, - wantErr: true, - resError: fmt.Errorf("no such task"), - link: client.Link{ - Href: "/kapacitor/v1/tasks/myid", - }, - }, - { - name: "return non-reversible task", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - }, - args: args{ - id: "myid", - }, - taskOptions: nil, - resTask: client.Task{ - ID: "myid", - Status: client.Enabled, - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "_internal", - RetentionPolicy: "autogen", - }, - }, - }, - want: &Task{ - ID: "myid", - HrefOutput: "/kapacitor/v1/tasks/myid/output", - Rule: chronograf.AlertRule{ - Type: "stream", - Status: "enabled", - ID: "myid", - Name: "myid", - DBRPs: []chronograf.DBRP{ - { - DB: "_internal", - RP: "autogen", - }, - }, - }, - }, - link: client.Link{ - Href: "/kapacitor/v1/tasks/myid", - }, - }, - { - name: "return reversible task", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - }, - args: args{ - id: "rule 1", - }, - taskOptions: nil, - resTask: client.Task{ - ID: "rule 1", - Status: client.Enabled, - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "_internal", - RetentionPolicy: "autogen", - }, - }, - TICKscript: `var db = '_internal' - -var rp = 'monitor' - -var measurement = 'cq' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var name = 'rule 1' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90000 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "queryOk") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - }, - want: &Task{ - ID: "rule 1", - HrefOutput: "/kapacitor/v1/tasks/rule 1/output", - Rule: chronograf.AlertRule{ - Type: "stream", - Status: "enabled", - DBRPs: []chronograf.DBRP{ - { - - DB: "_internal", - RP: "autogen", - }, - }, - ID: "rule 1", - Name: "rule 1", - TICKScript: `var db = '_internal' - -var rp = 'monitor' - -var measurement = 'cq' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var name = 'rule 1' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90000 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "queryOk") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90000", - }, - AlertNodes: chronograf.AlertNodes{ - IsStateChangesOnly: true, - }, - Query: &chronograf.QueryConfig{ - Database: "_internal", - RetentionPolicy: "monitor", - Measurement: "cq", - Fields: []chronograf.Field{ - { - Value: "queryOk", - Type: "field", - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - AreTagsAccepted: false, - }, - }, - }, - link: client.Link{ - Href: "/kapacitor/v1/tasks/rule 1", - }, - }, - } - for _, tt := range tests { - kapa.ResTask = tt.resTask - kapa.ResTasks = tt.resTasks - kapa.TaskError = tt.resError - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - URL: tt.fields.URL, - Username: tt.fields.Username, - Password: tt.fields.Password, - ID: tt.fields.ID, - Ticker: tt.fields.Ticker, - kapaClient: tt.fields.kapaClient, - } - got, err := c.Get(tt.args.ctx, tt.args.id) - if (err != nil) != tt.wantErr { - t.Errorf("Client.Get() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if !cmp.Equal(got, tt.want) { - t.Errorf("%q. Client.All() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.Get() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.TaskOptions, tt.taskOptions) { - t.Errorf("Client.Get() = taskOptions %v, want %v", kapa.TaskOptions, tt.taskOptions) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.Get() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.Link, tt.link) { - t.Errorf("Client.Get() = Link %v, want %v", kapa.Link, tt.link) - } - }) - } -} - -func TestClient_updateStatus(t *testing.T) { - type fields struct { - URL string - Username string - Password string - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) - } - type args struct { - ctx context.Context - href string - status client.TaskStatus - } - kapa := &MockKapa{} - tests := []struct { - name string - fields fields - args args - resTask client.Task - want *Task - resError error - wantErr bool - updateTaskOptions *client.UpdateTaskOptions - }{ - { - name: "disable alert rule", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/howdy", - status: client.Disabled, - }, - resTask: client.Task{ - ID: "howdy", - Status: client.Disabled, - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - Link: client.Link{ - Href: "/kapacitor/v1/tasks/howdy", - }, - }, - updateTaskOptions: &client.UpdateTaskOptions{ - TICKscript: "", - Status: client.Disabled, - }, - want: &Task{ - ID: "howdy", - Href: "/kapacitor/v1/tasks/howdy", - HrefOutput: "/kapacitor/v1/tasks/howdy/output", - Rule: chronograf.AlertRule{ - ID: "howdy", - Name: "howdy", - Type: "stream", - DBRPs: []chronograf.DBRP{ - { - - DB: "db", - RP: "rp", - }, - }, - Status: "disabled", - }, - }, - }, - { - name: "fail to enable alert rule", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/howdy", - status: client.Enabled, - }, - updateTaskOptions: &client.UpdateTaskOptions{ - TICKscript: "", - Status: client.Enabled, - }, - resError: fmt.Errorf("error"), - wantErr: true, - }, - { - name: "enable alert rule", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/howdy", - status: client.Enabled, - }, - resTask: client.Task{ - ID: "howdy", - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - Status: client.Enabled, - Link: client.Link{ - Href: "/kapacitor/v1/tasks/howdy", - }, - }, - updateTaskOptions: &client.UpdateTaskOptions{ - TICKscript: "", - Status: client.Enabled, - }, - want: &Task{ - ID: "howdy", - Href: "/kapacitor/v1/tasks/howdy", - HrefOutput: "/kapacitor/v1/tasks/howdy/output", - Rule: chronograf.AlertRule{ - ID: "howdy", - Name: "howdy", - Type: "stream", - DBRPs: []chronograf.DBRP{ - { - - DB: "db", - RP: "rp", - }, - }, - Status: "enabled", - }, - }, - }, - } - for _, tt := range tests { - kapa.ResTask = tt.resTask - kapa.UpdateError = tt.resError - kapa.UpdateTaskOptions = nil - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - URL: tt.fields.URL, - Username: tt.fields.Username, - Password: tt.fields.Password, - ID: tt.fields.ID, - Ticker: tt.fields.Ticker, - kapaClient: tt.fields.kapaClient, - } - got, err := c.updateStatus(tt.args.ctx, tt.args.href, tt.args.status) - if (err != nil) != tt.wantErr { - t.Errorf("Client.updateStatus() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf("%q. Client.updateStatus() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) - } - if !reflect.DeepEqual(kapa.UpdateTaskOptions, tt.updateTaskOptions) { - t.Errorf("Client.updateStatus() = %v, want %v", kapa.UpdateTaskOptions, tt.updateTaskOptions) - } - }) - } -} - -func TestClient_Update(t *testing.T) { - type fields struct { - URL string - Username string - Password string - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) - } - type args struct { - ctx context.Context - href string - rule chronograf.AlertRule - } - kapa := &MockKapa{} - tests := []struct { - name string - fields fields - args args - resTask client.Task - want *Task - resError error - wantErr bool - updateTaskOptions *client.UpdateTaskOptions - wantStatus client.TaskStatus - }{ - { - name: "update alert rule error", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/howdy", - rule: chronograf.AlertRule{ - ID: "howdy", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - resError: fmt.Errorf("error"), - updateTaskOptions: &client.UpdateTaskOptions{ - TICKscript: "", - Type: client.StreamTask, - Status: client.Disabled, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - wantErr: true, - wantStatus: client.Disabled, - }, - { - name: "update alert rule", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/howdy", - rule: chronograf.AlertRule{ - ID: "howdy", - Name: "myname", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Measurement: "meas", - Fields: []chronograf.Field{ - { - Type: "field", - Value: "usage_user", - }, - }, - }, - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: greaterThan, - }, - }, - }, - resTask: client.Task{ - ID: "howdy", - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - Status: client.Enabled, - Link: client.Link{ - Href: "/kapacitor/v1/tasks/howdy", - }, - }, - updateTaskOptions: &client.UpdateTaskOptions{ - TICKscript: "", - Type: client.StreamTask, - Status: client.Disabled, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - want: &Task{ - ID: "howdy", - Href: "/kapacitor/v1/tasks/howdy", - HrefOutput: "/kapacitor/v1/tasks/howdy/output", - Rule: chronograf.AlertRule{ - DBRPs: []chronograf.DBRP{ - { - - DB: "db", - RP: "rp", - }, - }, - Status: "enabled", - Type: "stream", - ID: "howdy", - Name: "howdy", - }, - }, - wantStatus: client.Enabled, - }, - { - name: "stays disabled when already disabled", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/howdy", - rule: chronograf.AlertRule{ - ID: "howdy", - Name: "myname", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Measurement: "meas", - Fields: []chronograf.Field{ - { - Type: "field", - Value: "usage_user", - }, - }, - }, - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: greaterThan, - }, - }, - }, - resTask: client.Task{ - ID: "howdy", - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - Status: client.Disabled, - Link: client.Link{ - Href: "/kapacitor/v1/tasks/howdy", - }, - }, - updateTaskOptions: &client.UpdateTaskOptions{ - TICKscript: "", - Type: client.StreamTask, - Status: client.Disabled, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - want: &Task{ - ID: "howdy", - Href: "/kapacitor/v1/tasks/howdy", - HrefOutput: "/kapacitor/v1/tasks/howdy/output", - Rule: chronograf.AlertRule{ - ID: "howdy", - Name: "howdy", - DBRPs: []chronograf.DBRP{ - { - - DB: "db", - RP: "rp", - }, - }, - Status: "disabled", - Type: "stream", - }, - }, - wantStatus: client.Disabled, - }, - { - name: "error because relative cannot have inside range", - wantErr: true, - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/error", - rule: chronograf.AlertRule{ - ID: "error", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Fields: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - Trigger: Relative, - TriggerValues: chronograf.TriggerValues{ - Operator: insideRange, - }, - }, - }, - }, - { - name: "error because rule has an unknown trigger mechanism", - wantErr: true, - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/error", - rule: chronograf.AlertRule{ - ID: "error", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - }, - { - name: "error because query has no fields", - wantErr: true, - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/error", - rule: chronograf.AlertRule{ - ID: "error", - Trigger: Threshold, - TriggerValues: chronograf.TriggerValues{ - Period: "1d", - }, - Name: "myname", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Measurement: "meas", - }, - }, - }, - }, - { - name: "error because alert has no name", - wantErr: true, - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/error", - rule: chronograf.AlertRule{ - ID: "error", - Trigger: Deadman, - TriggerValues: chronograf.TriggerValues{ - Period: "1d", - }, - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Measurement: "meas", - }, - }, - }, - }, - { - name: "error because alert period cannot be an empty string in deadman alert", - wantErr: true, - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - }, - args: args{ - ctx: context.Background(), - href: "/kapacitor/v1/tasks/error", - rule: chronograf.AlertRule{ - ID: "error", - Name: "myname", - Trigger: Deadman, - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Measurement: "meas", - }, - }, - }, - }, - } - for _, tt := range tests { - kapa.ResTask = tt.resTask - kapa.UpdateError = tt.resError - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - URL: tt.fields.URL, - Username: tt.fields.Username, - Password: tt.fields.Password, - ID: tt.fields.ID, - Ticker: tt.fields.Ticker, - kapaClient: tt.fields.kapaClient, - } - got, err := c.Update(tt.args.ctx, tt.args.href, tt.args.rule) - if (err != nil) != tt.wantErr { - t.Errorf("Client.Update() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantErr { - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf("%q. Client.Update() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) - } - var cmpOptions = cmp.Options{ - cmpopts.IgnoreFields(client.UpdateTaskOptions{}, "TICKscript"), - } - if !cmp.Equal(kapa.UpdateTaskOptions, tt.updateTaskOptions, cmpOptions...) { - t.Errorf("Client.Update() = %s", cmp.Diff(got, tt.updateTaskOptions, cmpOptions...)) - } - if tt.wantStatus != kapa.LastStatus { - t.Errorf("Client.Update() = %v, want %v", kapa.LastStatus, tt.wantStatus) - } - }) - } -} - -func TestClient_Create(t *testing.T) { - type fields struct { - URL string - Username string - Password string - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) - } - type args struct { - ctx context.Context - rule chronograf.AlertRule - } - kapa := &MockKapa{} - tests := []struct { - name string - fields fields - args args - resTask client.Task - want *Task - resError error - wantErr bool - createTaskOptions *client.CreateTaskOptions - }{ - { - name: "create alert rule with tags", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - ID: &MockID{ - ID: "howdy", - }, - }, - args: args{ - ctx: context.Background(), - rule: chronograf.AlertRule{ - ID: "", - Name: "myname's", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Measurement: "meas", - GroupBy: chronograf.GroupBy{ - Tags: []string{ - "tag1", - "tag2", - }, - }, - }, - Trigger: Deadman, - TriggerValues: chronograf.TriggerValues{ - Period: "1d", - }, - }, - }, - resTask: client.Task{ - ID: "chronograf-v1-howdy", - Status: client.Enabled, - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - Link: client.Link{ - Href: "/kapacitor/v1/tasks/chronograf-v1-howdy", - }, - }, - createTaskOptions: &client.CreateTaskOptions{ - TICKscript: `var db = 'db' - -var rp = 'rp' - -var measurement = 'meas' - -var groupBy = ['tag1', 'tag2'] - -var whereFilter = lambda: TRUE - -var period = 1d - -var name = 'myname\'s' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'deadman' - -var threshold = 0.0 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - -var trigger = data - |deadman(threshold, period) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |eval(lambda: "emitted") - .as('value') - .keep('value', messageField, durationField) - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - - ID: "chronograf-v1-howdy", - Type: client.StreamTask, - Status: client.Enabled, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - want: &Task{ - ID: "chronograf-v1-howdy", - Href: "/kapacitor/v1/tasks/chronograf-v1-howdy", - HrefOutput: "/kapacitor/v1/tasks/chronograf-v1-howdy/output", - Rule: chronograf.AlertRule{ - Type: "stream", - DBRPs: []chronograf.DBRP{ - { - - DB: "db", - RP: "rp", - }, - }, - Status: "enabled", - ID: "chronograf-v1-howdy", - Name: "chronograf-v1-howdy", - }, - }, - }, - { - name: "create alert rule with no tags", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - ID: &MockID{ - ID: "howdy", - }, - }, - args: args{ - ctx: context.Background(), - rule: chronograf.AlertRule{ - ID: "", - Name: "myname's", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - Measurement: "meas", - }, - Trigger: Deadman, - TriggerValues: chronograf.TriggerValues{ - Period: "1d", - }, - }, - }, - resTask: client.Task{ - ID: "chronograf-v1-howdy", - Status: client.Enabled, - Type: client.StreamTask, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - Link: client.Link{ - Href: "/kapacitor/v1/tasks/chronograf-v1-howdy", - }, - }, - createTaskOptions: &client.CreateTaskOptions{ - TICKscript: `var db = 'db' - -var rp = 'rp' - -var measurement = 'meas' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var period = 1d - -var name = 'myname\'s' - -var idVar = name - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'deadman' - -var threshold = 0.0 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - -var trigger = data - |deadman(threshold, period) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |eval(lambda: "emitted") - .as('value') - .keep('value', messageField, durationField) - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - - ID: "chronograf-v1-howdy", - Type: client.StreamTask, - Status: client.Enabled, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - want: &Task{ - ID: "chronograf-v1-howdy", - Href: "/kapacitor/v1/tasks/chronograf-v1-howdy", - HrefOutput: "/kapacitor/v1/tasks/chronograf-v1-howdy/output", - Rule: chronograf.AlertRule{ - Type: "stream", - DBRPs: []chronograf.DBRP{ - { - - DB: "db", - RP: "rp", - }, - }, - Status: "enabled", - ID: "chronograf-v1-howdy", - Name: "chronograf-v1-howdy", - }, - }, - }, - { - name: "create alert rule error", - fields: fields{ - kapaClient: func(url, username, password string, insecureSkipVerify bool) (KapaClient, error) { - return kapa, nil - }, - Ticker: &Alert{}, - ID: &MockID{ - ID: "howdy", - }, - }, - args: args{ - ctx: context.Background(), - rule: chronograf.AlertRule{ - ID: "howdy", - Query: &chronograf.QueryConfig{ - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - resError: fmt.Errorf("error"), - createTaskOptions: &client.CreateTaskOptions{ - ID: "chronograf-v1-howdy", - Type: client.StreamTask, - Status: client.Enabled, - DBRPs: []client.DBRP{ - { - Database: "db", - RetentionPolicy: "rp", - }, - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - kapa.ResTask = tt.resTask - kapa.CreateError = tt.resError - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - URL: tt.fields.URL, - Username: tt.fields.Username, - Password: tt.fields.Password, - ID: tt.fields.ID, - Ticker: tt.fields.Ticker, - kapaClient: tt.fields.kapaClient, - } - got, err := c.Create(tt.args.ctx, tt.args.rule) - if (err != nil) != tt.wantErr { - t.Errorf("Client.Create() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantErr { - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf("%q. Client.Create() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) - } - if !reflect.DeepEqual(kapa.CreateTaskOptions, tt.createTaskOptions) { - t.Errorf("Client.Create() = %v, want %v", kapa.CreateTaskOptions, tt.createTaskOptions) - } - }) - } -} diff --git a/chronograf/.kapacitor/data.go b/chronograf/.kapacitor/data.go deleted file mode 100644 index a8dc218ca24..00000000000 --- a/chronograf/.kapacitor/data.go +++ /dev/null @@ -1,63 +0,0 @@ -package kapacitor - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Data returns the tickscript data section for querying -func Data(rule chronograf.AlertRule) (string, error) { - if rule.Query.RawText != nil && *rule.Query.RawText != "" { - batch := ` - var data = batch - |query(''' - %s - ''') - .period(period) - .every(every) - .align()` - batch = fmt.Sprintf(batch, rule.Query.RawText) - if rule.Query.GroupBy.Time != "" { - batch = batch + fmt.Sprintf(".groupBy(%s)", rule.Query.GroupBy.Time) - } - return batch, nil - } - stream := `var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - ` - - stream = fmt.Sprintf("%s\n.groupBy(groupBy)\n", stream) - stream = stream + ".where(whereFilter)\n" - // Only need aggregate functions for threshold and relative - - if rule.Trigger != "deadman" { - fld, err := field(rule.Query) - if err != nil { - return "", err - } - value := "" - for _, field := range rule.Query.Fields { - if field.Type == "func" && len(field.Args) > 0 && field.Args[0].Type == "field" { - // Only need a window if we have an aggregate function - value = value + "|window().period(period).every(every).align()\n" - value = value + fmt.Sprintf(`|%s('%s').as('value')`, field.Value, field.Args[0].Value) - break // only support a single field - } - if value != "" { - break // only support a single field - } - if field.Type == "field" { - value = fmt.Sprintf(`|eval(lambda: "%s").as('value')`, field.Value) - } - } - if value == "" { - value = fmt.Sprintf(`|eval(lambda: "%s").as('value')`, fld) - } - stream = stream + value - } - return stream, nil -} diff --git a/chronograf/.kapacitor/data_test.go b/chronograf/.kapacitor/data_test.go deleted file mode 100644 index 35a4544e4a3..00000000000 --- a/chronograf/.kapacitor/data_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package kapacitor - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var config = `{ - "id": "93e17825-2fb0-4507-87bd-a0c136947f7e", - "database": "telegraf", - "measurement": "cpu", - "retentionPolicy": "default", - "fields": [{ - "field": "usage_user", - "funcs": ["mean"] - }], - "tags": { - "host": [ - "acc-0eabc309-eu-west-1-data-3", - "prod" - ], - "cpu": [ - "cpu_total" - ] - }, - "groupBy": { - "time": null, - "tags": [ - "host", - "cluster_id" - ] - }, - "areTagsAccepted": true, - "rawText": null -}` - -func TestData(t *testing.T) { - q := chronograf.QueryConfig{} - err := json.Unmarshal([]byte(config), &q) - if err != nil { - t.Errorf("Error unmarshalling %v", err) - } - alert := chronograf.AlertRule{ - Trigger: "deadman", - Query: &q, - } - if tick, err := Data(alert); err != nil { - t.Errorf("Error creating tick %v", err) - } else { - _, err := formatTick(tick) - if err != nil { - fmt.Print(tick) - t.Errorf("Error formatting tick %v", err) - } - } - -} diff --git a/chronograf/.kapacitor/errors.go b/chronograf/.kapacitor/errors.go deleted file mode 100644 index e57cd839b93..00000000000 --- a/chronograf/.kapacitor/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package kapacitor - -// ErrNotChronoTickscript signals a TICKscript that cannot be parsed into -// chronograf data structure. -const ErrNotChronoTickscript = Error("TICKscript not built with chronograf builder") - -// Error are kapacitor errors due to communication or processing of TICKscript to kapacitor -type Error string - -func (e Error) Error() string { - return string(e) -} diff --git a/chronograf/.kapacitor/http_out.go b/chronograf/.kapacitor/http_out.go deleted file mode 100644 index ec569144d6c..00000000000 --- a/chronograf/.kapacitor/http_out.go +++ /dev/null @@ -1,15 +0,0 @@ -package kapacitor - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// HTTPEndpoint is the default location of the tickscript output -const HTTPEndpoint = "output" - -// HTTPOut adds a kapacitor httpOutput to a tickscript -func HTTPOut(rule chronograf.AlertRule) (string, error) { - return fmt.Sprintf(`trigger|httpOut('%s')`, HTTPEndpoint), nil -} diff --git a/chronograf/.kapacitor/influxout.go b/chronograf/.kapacitor/influxout.go deleted file mode 100644 index 8cf507a4473..00000000000 --- a/chronograf/.kapacitor/influxout.go +++ /dev/null @@ -1,34 +0,0 @@ -package kapacitor - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// InfluxOut creates a kapacitor influxDBOut node to write alert data to Database, RP, Measurement. -func InfluxOut(rule chronograf.AlertRule) (string, error) { - // For some of the alert, the data needs to be renamed (normalized) - // before being sent to influxdb. - - rename := "" - if rule.Trigger == "deadman" { - rename = `|eval(lambda: "emitted") - .as('value') - .keep('value', messageField, durationField)` - } - return fmt.Sprintf(` - trigger - %s - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - `, rename), nil -} diff --git a/chronograf/.kapacitor/influxout_test.go b/chronograf/.kapacitor/influxout_test.go deleted file mode 100644 index faeef743926..00000000000 --- a/chronograf/.kapacitor/influxout_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package kapacitor - -import "testing" -import "github.com/influxdata/influxdb/v2/chronograf" - -func TestInfluxOut(t *testing.T) { - tests := []struct { - name string - want chronograf.TICKScript - }{ - { - name: "Test influxDBOut kapacitor node", - want: `trigger - |eval(lambda: "emitted") - .as('value') - .keep('value', messageField, durationField) - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) -`, - }, - } - for _, tt := range tests { - got, err := InfluxOut(chronograf.AlertRule{ - Name: "name", - Trigger: "deadman", - Query: &chronograf.QueryConfig{ - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - }, - }) - if err != nil { - t.Errorf("%q. InfluxOut()) error = %v", tt.name, err) - continue - } - formatted, err := formatTick(got) - if err != nil { - t.Errorf("%q. formatTick() error = %v", tt.name, err) - continue - } - if formatted != tt.want { - t.Errorf("%q. InfluxOut() = %v, want %v", tt.name, formatted, tt.want) - } - } -} diff --git a/chronograf/.kapacitor/kapa_client.go b/chronograf/.kapacitor/kapa_client.go deleted file mode 100644 index 7d5db8a2969..00000000000 --- a/chronograf/.kapacitor/kapa_client.go +++ /dev/null @@ -1,113 +0,0 @@ -package kapacitor - -import ( - "sync" - - client "github.com/influxdata/kapacitor/client/v1" -) - -const ( - // ListTaskWorkers describes the number of workers concurrently fetching - // tasks from Kapacitor. This constant was chosen after some benchmarking - // work and should likely work well for quad-core systems - ListTaskWorkers = 4 - - // TaskGatherers is the number of workers collating responses from - // ListTaskWorkers. There can only be one without additional synchronization - // around the output buffer from ListTasks - TaskGatherers = 1 -) - -// ensure PaginatingKapaClient is a KapaClient -var _ KapaClient = &PaginatingKapaClient{} - -// PaginatingKapaClient is a Kapacitor client that automatically navigates -// through Kapacitor's pagination to fetch all results -type PaginatingKapaClient struct { - KapaClient - FetchRate int // specifies the number of elements to fetch from Kapacitor at a time -} - -// ListTasks lists all available tasks from Kapacitor, navigating pagination as -// it fetches them -func (p *PaginatingKapaClient) ListTasks(opts *client.ListTasksOptions) ([]client.Task, error) { - // only trigger auto-pagination with Offset=0 and Limit=0 - if opts.Limit != 0 || opts.Offset != 0 { - return p.KapaClient.ListTasks(opts) - } - - allTasks := []client.Task{} - - optChan := make(chan client.ListTasksOptions) - taskChan := make(chan []client.Task, ListTaskWorkers) - done := make(chan struct{}) - - var once sync.Once - - go p.generateKapacitorOptions(optChan, *opts, done) - - var wg sync.WaitGroup - - wg.Add(ListTaskWorkers) - for i := 0; i < ListTaskWorkers; i++ { - go p.fetchFromKapacitor(optChan, &wg, &once, taskChan, done) - } - - var gatherWg sync.WaitGroup - gatherWg.Add(TaskGatherers) - go func() { - for task := range taskChan { - allTasks = append(allTasks, task...) - } - gatherWg.Done() - }() - - wg.Wait() - close(taskChan) - gatherWg.Wait() - - return allTasks, nil -} - -// fetchFromKapacitor fetches a set of results from a kapacitor by reading a -// set of options from the provided optChan. Fetched tasks are pushed onto the -// provided taskChan -func (p *PaginatingKapaClient) fetchFromKapacitor(optChan chan client.ListTasksOptions, wg *sync.WaitGroup, closer *sync.Once, taskChan chan []client.Task, done chan struct{}) { - defer wg.Done() - for opt := range optChan { - resp, err := p.KapaClient.ListTasks(&opt) - if err != nil { - return - } - - // break and stop all workers if we're done - if len(resp) == 0 { - closer.Do(func() { - close(done) - }) - return - } - - // handoff tasks to consumer - taskChan <- resp - } -} - -// generateKapacitorOptions creates ListTasksOptions with incrementally greater -// Limit and Offset parameters, and inserts them into the provided optChan -func (p *PaginatingKapaClient) generateKapacitorOptions(optChan chan client.ListTasksOptions, opts client.ListTasksOptions, done chan struct{}) { - // ensure Limit and Offset start from known quantities - opts.Limit = p.FetchRate - opts.Offset = 0 - - for { - select { - case <-done: - close(optChan) - return - case optChan <- opts: - // nop - } - opts.Offset = p.FetchRate + opts.Offset - } -} diff --git a/chronograf/.kapacitor/kapa_client_benchmark_test.go b/chronograf/.kapacitor/kapa_client_benchmark_test.go deleted file mode 100644 index 0d2dc08d965..00000000000 --- a/chronograf/.kapacitor/kapa_client_benchmark_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package kapacitor_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf/kapacitor" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - client "github.com/influxdata/kapacitor/client/v1" -) - -func BenchmarkKapaClient100(b *testing.B) { benchmark_PaginatingKapaClient(100, b) } -func BenchmarkKapaClient1000(b *testing.B) { benchmark_PaginatingKapaClient(1000, b) } -func BenchmarkKapaClient10000(b *testing.B) { benchmark_PaginatingKapaClient(10000, b) } -func BenchmarkKapaClient100000(b *testing.B) { benchmark_PaginatingKapaClient(100000, b) } - -var tasks []client.Task - -func benchmark_PaginatingKapaClient(taskCount int, b *testing.B) { - - b.StopTimer() // eliminate setup time - - // create a mock client that will return a huge response from ListTasks - mockClient := &mocks.KapaClient{ - ListTasksF: func(opts *client.ListTasksOptions) ([]client.Task, error) { - // create all the tasks - allTasks := make([]client.Task, taskCount) - - begin := opts.Offset - end := opts.Offset + opts.Limit - - if end > len(allTasks) { - end = len(allTasks) - } - - if begin > len(allTasks) { - begin = end - } - - return allTasks[begin:end], nil - }, - } - - pkap := kapacitor.PaginatingKapaClient{ - KapaClient: mockClient, - FetchRate: 50, - } - - opts := &client.ListTasksOptions{} - - b.StartTimer() // eliminate setup time - - // let the benchmark runner run ListTasks until it's satisfied - for n := 0; n < b.N; n++ { - // assignment is to avoid having the call optimized away - tasks, _ = pkap.ListTasks(opts) - } -} diff --git a/chronograf/.kapacitor/kapa_client_test.go b/chronograf/.kapacitor/kapa_client_test.go deleted file mode 100644 index c9e5ce5220b..00000000000 --- a/chronograf/.kapacitor/kapa_client_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package kapacitor_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf/kapacitor" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - client "github.com/influxdata/kapacitor/client/v1" -) - -func Test_Kapacitor_PaginatingKapaClient(t *testing.T) { - const lenAllTasks = 227 // prime, to stress odd result sets - - // create a mock client that will return a huge response from ListTasks - mockClient := &mocks.KapaClient{ - ListTasksF: func(opts *client.ListTasksOptions) ([]client.Task, error) { - // create all the tasks - allTasks := []client.Task{} - for i := 0; i < lenAllTasks; i++ { - allTasks = append(allTasks, client.Task{}) - } - begin := opts.Offset - end := opts.Offset + opts.Limit - - if end > len(allTasks) { - end = len(allTasks) - } - - if begin > len(allTasks) { - begin = end - } - - return allTasks[begin:end], nil - }, - } - - pkap := kapacitor.PaginatingKapaClient{ - KapaClient: mockClient, - FetchRate: 50, - } - - opts := &client.ListTasksOptions{ - Limit: 100, - Offset: 0, - } - - // ensure 100 elems returned when calling mockClient directly - tasks, _ := pkap.ListTasks(opts) - - if len(tasks) != 100 { - t.Error("Expected calling KapaClient's ListTasks to return", opts.Limit, "items. Received:", len(tasks)) - } - - // ensure PaginatingKapaClient returns _all_ tasks with 0 value for Limit and Offset - allOpts := &client.ListTasksOptions{} - allTasks, _ := pkap.ListTasks(allOpts) - - if len(allTasks) != lenAllTasks { - t.Error("PaginatingKapaClient: Expected to find", lenAllTasks, "tasks but found", len(allTasks)) - } -} diff --git a/chronograf/.kapacitor/operators.go b/chronograf/.kapacitor/operators.go deleted file mode 100644 index 5b53a1d2038..00000000000 --- a/chronograf/.kapacitor/operators.go +++ /dev/null @@ -1,78 +0,0 @@ -package kapacitor - -import ( - "fmt" -) - -const ( - greaterThan = "greater than" - lessThan = "less than" - lessThanEqual = "equal to or less than" - greaterThanEqual = "equal to or greater" - equal = "equal to" - notEqual = "not equal to" - insideRange = "inside range" - outsideRange = "outside range" -) - -// kapaOperator converts UI strings to kapacitor operators -func kapaOperator(operator string) (string, error) { - switch operator { - case greaterThan: - return ">", nil - case lessThan: - return "<", nil - case lessThanEqual: - return "<=", nil - case greaterThanEqual: - return ">=", nil - case equal: - return "==", nil - case notEqual: - return "!=", nil - default: - return "", fmt.Errorf("invalid operator: %s is unknown", operator) - } -} - -func chronoOperator(operator string) (string, error) { - switch operator { - case ">": - return greaterThan, nil - case "<": - return lessThan, nil - case "<=": - return lessThanEqual, nil - case ">=": - return greaterThanEqual, nil - case "==": - return equal, nil - case "!=": - return notEqual, nil - default: - return "", fmt.Errorf("invalid operator: %s is unknown", operator) - } -} - -func rangeOperators(operator string) ([]string, error) { - switch operator { - case insideRange: - return []string{">=", "AND", "<="}, nil - case outsideRange: - return []string{"<", "OR", ">"}, nil - default: - return nil, fmt.Errorf("invalid operator: %s is unknown", operator) - } -} - -func chronoRangeOperators(ops []string) (string, error) { - if len(ops) != 3 { - return "", fmt.Errorf("unknown operators") - } - if ops[0] == ">=" && ops[1] == "AND" && ops[2] == "<=" { - return insideRange, nil - } else if ops[0] == "<" && ops[1] == "OR" && ops[2] == ">" { - return outsideRange, nil - } - return "", fmt.Errorf("unknown operators") -} diff --git a/chronograf/.kapacitor/pipeline.go b/chronograf/.kapacitor/pipeline.go deleted file mode 100644 index a17db1f5c94..00000000000 --- a/chronograf/.kapacitor/pipeline.go +++ /dev/null @@ -1,37 +0,0 @@ -package kapacitor - -import ( - "bytes" - "encoding/json" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/kapacitor/pipeline" - totick "github.com/influxdata/kapacitor/pipeline/tick" -) - -// MarshalTICK converts tickscript to JSON representation -func MarshalTICK(script string) ([]byte, error) { - pipeline, err := newPipeline(chronograf.TICKScript(script)) - if err != nil { - return nil, err - } - return json.MarshalIndent(pipeline, "", " ") -} - -// UnmarshalTICK converts JSON to tickscript -func UnmarshalTICK(octets []byte) (string, error) { - pipe := &pipeline.Pipeline{} - if err := pipe.Unmarshal(octets); err != nil { - return "", err - } - - ast := totick.AST{} - err := ast.Build(pipe) - if err != nil { - return "", err - } - - var buf bytes.Buffer - ast.Program.Format(&buf, "", false) - return buf.String(), nil -} diff --git a/chronograf/.kapacitor/pipeline_test.go b/chronograf/.kapacitor/pipeline_test.go deleted file mode 100644 index 6c8eb3367be..00000000000 --- a/chronograf/.kapacitor/pipeline_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package kapacitor - -import ( - "fmt" - "testing" - - "github.com/sergi/go-diff/diffmatchpatch" -) - -func TestPipelineJSON(t *testing.T) { - script := `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - -trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -` - - want := `var alert4 = stream - |from() - .database('telegraf') - .retentionPolicy('autogen') - .measurement('cpu') - .where(lambda: "cpu" == 'cpu_total' AND "host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - .groupBy('host', 'cluster_id') - |window() - .period(10m) - .every(30s) - .align() - |mean('usage_user') - .as('value') - |alert() - .id('name:{{.Group}}') - .message('message') - .details('{{ json . }}') - .crit(lambda: "value" > 90) - .history(21) - .levelTag('level') - .messageField('message') - .durationField('duration') - .idTag('alertID') - .stateChangesOnly() - .email() - .victorOps() - .slack() - -alert4 - |httpOut('output') - -alert4 - |influxDBOut() - .database('chronograf') - .retentionPolicy('autogen') - .measurement('alerts') - .buffer(1000) - .flushInterval(10s) - .create() - .tag('alertName', 'name') - .tag('triggerType', 'threshold') -` - - octets, err := MarshalTICK(script) - if err != nil { - t.Fatalf("MarshalTICK unexpected error %v", err) - } - - got, err := UnmarshalTICK(octets) - if err != nil { - t.Fatalf("UnmarshalTICK unexpected error %v", err) - } - - if got != want { - fmt.Println(got) - diff := diffmatchpatch.New() - delta := diff.DiffMain(want, got, true) - t.Errorf("%s", diff.DiffPrettyText(delta)) - } -} -func TestPipelineJSONDeadman(t *testing.T) { - script := `var db = 'telegraf' - - var rp = 'autogen' - - var measurement = 'cpu' - - var groupBy = ['host', 'cluster_id'] - - var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - - var period = 10m - - var name = 'name' - - var idVar = name + ':{{.Group}}' - - var message = 'message' - - var idTag = 'alertID' - - var levelTag = 'level' - - var messageField = 'message' - - var durationField = 'duration' - - var outputDB = 'chronograf' - - var outputRP = 'autogen' - - var outputMeasurement = 'alerts' - - var triggerType = 'deadman' - - var threshold = 0.0 - - var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - - var trigger = data - |deadman(threshold, period) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .slack() - .victorOps() - .email() - - trigger - |eval(lambda: "emitted") - .as('value') - .keep('value', messageField, durationField) - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - - trigger - |httpOut('output') -` - - wantA := `var from1 = stream - |from() - .database('telegraf') - .retentionPolicy('autogen') - .measurement('cpu') - .where(lambda: "cpu" == 'cpu_total' AND "host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - .groupBy('host', 'cluster_id') - -var alert5 = from1 - |stats(10m) - .align() - |derivative('emitted') - .as('emitted') - .unit(10m) - .nonNegative() - |alert() - .id('name:{{.Group}}') - .message('message') - .details('{{ json . }}') - .crit(lambda: "emitted" <= 0.0) - .history(21) - .levelTag('level') - .messageField('message') - .durationField('duration') - .idTag('alertID') - .stateChangesOnly() - .email() - .victorOps() - .slack() - -alert5 - |httpOut('output') - -alert5 - |eval(lambda: "emitted") - .as('value') - .tags() - .keep('value', 'message', 'duration') - |influxDBOut() - .database('chronograf') - .retentionPolicy('autogen') - .measurement('alerts') - .buffer(1000) - .flushInterval(10s) - .create() - .tag('alertName', 'name') - .tag('triggerType', 'deadman') -` - - wantB := `var from1 = stream - |from() - .database('telegraf') - .retentionPolicy('autogen') - .measurement('cpu') - .where(lambda: "cpu" == 'cpu_total' AND "host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - .groupBy('host', 'cluster_id') - -var alert5 = from1 - |stats(10m) - .align() - |derivative('emitted') - .as('emitted') - .unit(10m) - .nonNegative() - |alert() - .id('name:{{.Group}}') - .message('message') - .details('{{ json . }}') - .crit(lambda: "emitted" <= 0.0) - .history(21) - .levelTag('level') - .messageField('message') - .durationField('duration') - .idTag('alertID') - .stateChangesOnly() - .email() - .victorOps() - .slack() - -alert5 - |eval(lambda: "emitted") - .as('value') - .tags() - .keep('value', 'message', 'duration') - |influxDBOut() - .database('chronograf') - .retentionPolicy('autogen') - .measurement('alerts') - .buffer(1000) - .flushInterval(10s) - .create() - .tag('alertName', 'name') - .tag('triggerType', 'deadman') - -alert5 - |httpOut('output') -` - - octets, err := MarshalTICK(script) - if err != nil { - t.Fatalf("MarshalTICK unexpected error %v", err) - } - got, err := UnmarshalTICK(octets) - if err != nil { - t.Fatalf("UnmarshalTICK unexpected error %v", err) - } - - if got != wantA && got != wantB { - want := wantA - fmt.Println("got") - fmt.Println(got) - fmt.Println("want") - fmt.Println(want) - diff := diffmatchpatch.New() - delta := diff.DiffMain(want, got, true) - t.Errorf("%s", diff.DiffPrettyText(delta)) - } -} diff --git a/chronograf/.kapacitor/tickscripts.go b/chronograf/.kapacitor/tickscripts.go deleted file mode 100644 index d2c21d2c439..00000000000 --- a/chronograf/.kapacitor/tickscripts.go +++ /dev/null @@ -1,50 +0,0 @@ -package kapacitor - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.Ticker = &Alert{} - -// Alert defines alerting strings in template rendering -type Alert struct{} - -// Generate creates a Tickscript from the alertrule -func (a *Alert) Generate(rule chronograf.AlertRule) (chronograf.TICKScript, error) { - vars, err := Vars(rule) - if err != nil { - return "", err - } - data, err := Data(rule) - if err != nil { - return "", err - } - trigger, err := Trigger(rule) - if err != nil { - return "", err - } - services, err := AlertServices(rule) - if err != nil { - return "", err - } - output, err := InfluxOut(rule) - if err != nil { - return "", err - } - http, err := HTTPOut(rule) - if err != nil { - return "", err - } - - raw := fmt.Sprintf("%s\n%s\n%s%s\n%s\n%s", vars, data, trigger, services, output, http) - tick, err := formatTick(raw) - if err != nil { - return "", err - } - if err := validateTick(tick); err != nil { - return tick, err - } - return tick, nil -} diff --git a/chronograf/.kapacitor/tickscripts_test.go b/chronograf/.kapacitor/tickscripts_test.go deleted file mode 100644 index 402c3d6fa47..00000000000 --- a/chronograf/.kapacitor/tickscripts_test.go +++ /dev/null @@ -1,1625 +0,0 @@ -package kapacitor - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/sergi/go-diff/diffmatchpatch" -) - -func TestGenerate(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "relative", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Change: "change", - Shift: "1m", - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - gen := Alert{} - tick, err := gen.Generate(alert) - if err != nil { - fmt.Printf("%s", tick) - t.Errorf("Error generating alert: %v %s", err, tick) - } -} - -func TestThreshold(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Threshold() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestThresholdStringCrit(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "haproxy", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "equal to", - Value: "DOWN", - }, - Every: "10s", - Message: `Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} `, - Details: "Email template", - Query: &chronograf.QueryConfig{ - Database: "influxdb", - RetentionPolicy: "autogen", - Measurement: "haproxy", - Fields: []chronograf.Field{ - { - Value: "last", - Type: "func", - Args: []chronograf.Field{ - { - Value: "status", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10s", - Tags: []string{"pxname"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'influxdb' - -var rp = 'autogen' - -var measurement = 'haproxy' - -var groupBy = ['pxname'] - -var whereFilter = lambda: TRUE - -var period = 10s - -var every = 10s - -var name = 'haproxy' - -var idVar = name + ':{{.Group}}' - -var message = 'Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} ' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var details = 'Email template' - -var crit = 'DOWN' - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |last('status') - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" == crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .details(details) - .email() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Threshold() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -// TODO: Check with Nathaniel if kapacitor can do inequalities on strings -// If it cannot, I think we should add operator checks. -func TestThresholdStringCritGreater(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "haproxy", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "DOWN", - }, - Every: "10s", - Message: `Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} `, - Details: "Email template", - Query: &chronograf.QueryConfig{ - Database: "influxdb", - RetentionPolicy: "autogen", - Measurement: "haproxy", - Fields: []chronograf.Field{ - { - Value: "last", - Type: "func", - Args: []chronograf.Field{ - { - Value: "status", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10s", - Tags: []string{"pxname"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'influxdb' - -var rp = 'autogen' - -var measurement = 'haproxy' - -var groupBy = ['pxname'] - -var whereFilter = lambda: TRUE - -var period = 10s - -var every = 10s - -var name = 'haproxy' - -var idVar = name + ':{{.Group}}' - -var message = 'Haproxy monitor : {{.ID}} : {{ index .Tags "server" }} : {{ index .Tags "pxname" }} is {{ .Level }} ' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var details = 'Email template' - -var crit = 'DOWN' - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |last('status') - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .details(details) - .email() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Threshold() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestThresholdDetail(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Details: "details", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var details = 'details' - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .details(details) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Threshold() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestThresholdInsideRange(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "inside range", - Value: "90", - RangeValue: "100", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var lower = 90 - -var upper = 100 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" >= lower AND "value" <= upper) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Threshold() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestThresholdOutsideRange(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "outside range", - Value: "90", - RangeValue: "100", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var lower = 90 - -var upper = 100 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" < lower OR "value" > upper) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Threshold() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestThresholdNoAggregate(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "usage_user") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Threshold() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestRelative(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "relative", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Change: "% change", - Shift: "1m", - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'relative' - -var shift = 1m - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: abs(float("current.value" - "past.value")) / float("past.value") * 100.0) - .keep() - .as('value') - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Relative() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestRelativeChange(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "relative", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Change: "change", - Shift: "1m", - Operator: "greater than", - Value: "90", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var every = 30s - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'relative' - -var shift = 1m - -var crit = 90 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |window() - .period(period) - .every(every) - .align() - |mean('usage_user') - .as('value') - -var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: float("current.value" - "past.value")) - .keep() - .as('value') - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Relative() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - diff := diffmatchpatch.New() - delta := diff.DiffMain(string(tt.want), string(got), true) - t.Errorf("%q\n%s", tt.name, diff.DiffPrettyText(delta)) - } - } -} - -func TestDeadman(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "deadman", - AlertNodes: chronograf.AlertNodes{ - Slack: []*chronograf.Slack{{}}, - VictorOps: []*chronograf.VictorOps{{}}, - Email: []*chronograf.Email{{}}, - }, - TriggerValues: chronograf.TriggerValues{ - Period: "10m", - }, - Every: "30s", - Message: "message", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "host": []string{ - "acc-0eabc309-eu-west-1-data-3", - "prod", - }, - "cpu": []string{ - "cpu_total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{"host", "cluster_id"}, - }, - AreTagsAccepted: true, - }, - } - - tests := []struct { - name string - alert chronograf.AlertRule - want chronograf.TICKScript - wantErr bool - }{ - { - name: "Test valid template alert", - alert: alert, - want: `var db = 'telegraf' - -var rp = 'autogen' - -var measurement = 'cpu' - -var groupBy = ['host', 'cluster_id'] - -var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod') - -var period = 10m - -var name = 'name' - -var idVar = name + ':{{.Group}}' - -var message = 'message' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'deadman' - -var threshold = 0.0 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - -var trigger = data - |deadman(threshold, period) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - .email() - .victorOps() - .slack() - -trigger - |eval(lambda: "emitted") - .as('value') - .keep('value', messageField, durationField) - |eval(lambda: float("value")) - .as('value') - .keep() - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - wantErr: false, - }, - } - for _, tt := range tests { - gen := Alert{} - got, err := gen.Generate(tt.alert) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Deadman() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - t.Errorf("%q\n%s", tt.name, cmp.Diff(string(tt.want), string(got))) - } - } -} diff --git a/chronograf/.kapacitor/triggers.go b/chronograf/.kapacitor/triggers.go deleted file mode 100644 index 83c92429a5e..00000000000 --- a/chronograf/.kapacitor/triggers.go +++ /dev/null @@ -1,162 +0,0 @@ -package kapacitor - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -const ( - // Deadman triggers when data is missing for a period of time - Deadman = "deadman" - // Relative triggers when the value has changed compared to the past - Relative = "relative" - // Threshold triggers when value crosses a threshold - Threshold = "threshold" - // ThresholdRange triggers when a value is inside or outside a range - ThresholdRange = "range" - // ChangePercent triggers a relative alert when value changed by a percentage - ChangePercent = "% change" - // ChangeAmount triggers a relative alert when the value change by some amount - ChangeAmount = "change" -) - -// AllAlerts are properties all alert types will have -var AllAlerts = ` - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) -` - -// Details is used only for alerts that specify detail string -var Details = ` - .details(details) -` - -// ThresholdTrigger is the tickscript trigger for alerts that exceed a value -var ThresholdTrigger = ` - var trigger = data - |alert() - .crit(lambda: "value" %s crit) -` - -// ThresholdRangeTrigger is the alert when data does not intersect the range. -var ThresholdRangeTrigger = ` - var trigger = data - |alert() - .crit(lambda: "value" %s lower %s "value" %s upper) -` - -// RelativeAbsoluteTrigger compares one window of data versus another (current - past) -var RelativeAbsoluteTrigger = ` -var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: float("current.value" - "past.value")) - .keep() - .as('value') - |alert() - .crit(lambda: "value" %s crit) -` - -// RelativePercentTrigger compares one window of data versus another as a percent change. -var RelativePercentTrigger = ` -var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: abs(float("current.value" - "past.value"))/float("past.value") * 100.0) - .keep() - .as('value') - |alert() - .crit(lambda: "value" %s crit) -` - -// DeadmanTrigger checks if any data has been streamed in the last period of time -var DeadmanTrigger = ` - var trigger = data|deadman(threshold, period) -` - -// Trigger returns the trigger mechanism for a tickscript -func Trigger(rule chronograf.AlertRule) (string, error) { - var trigger string - var err error - switch rule.Trigger { - case Deadman: - trigger, err = DeadmanTrigger, nil - case Relative: - trigger, err = relativeTrigger(rule) - case Threshold: - if rule.TriggerValues.RangeValue == "" { - trigger, err = thresholdTrigger(rule) - } else { - trigger, err = thresholdRangeTrigger(rule) - } - default: - trigger, err = "", fmt.Errorf("unknown trigger type: %s", rule.Trigger) - } - - if err != nil { - return "", err - } - - // Only add stateChangesOnly to new rules - if rule.ID == "" { - trigger += ` - .stateChangesOnly() - ` - } - - trigger += AllAlerts - - if rule.Details != "" { - trigger += Details - } - return trigger, nil -} - -func relativeTrigger(rule chronograf.AlertRule) (string, error) { - op, err := kapaOperator(rule.TriggerValues.Operator) - if err != nil { - return "", err - } - if rule.TriggerValues.Change == ChangePercent { - return fmt.Sprintf(RelativePercentTrigger, op), nil - } else if rule.TriggerValues.Change == ChangeAmount { - return fmt.Sprintf(RelativeAbsoluteTrigger, op), nil - } else { - return "", fmt.Errorf("unknown change type %s", rule.TriggerValues.Change) - } -} - -func thresholdTrigger(rule chronograf.AlertRule) (string, error) { - op, err := kapaOperator(rule.TriggerValues.Operator) - if err != nil { - return "", err - } - return fmt.Sprintf(ThresholdTrigger, op), nil -} - -func thresholdRangeTrigger(rule chronograf.AlertRule) (string, error) { - ops, err := rangeOperators(rule.TriggerValues.Operator) - if err != nil { - return "", err - } - var iops = make([]interface{}, len(ops)) - for i, o := range ops { - iops[i] = o - } - return fmt.Sprintf(ThresholdRangeTrigger, iops...), nil -} diff --git a/chronograf/.kapacitor/triggers_test.go b/chronograf/.kapacitor/triggers_test.go deleted file mode 100644 index 1e09bc563aa..00000000000 --- a/chronograf/.kapacitor/triggers_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package kapacitor - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestTrigger(t *testing.T) { - tests := []struct { - name string - rule chronograf.AlertRule - want string - wantErr bool - }{ - { - name: "Test Deadman", - rule: chronograf.AlertRule{ - Trigger: "deadman", - }, - want: `var trigger = data - |deadman(threshold, period) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) -`, - wantErr: false, - }, - { - name: "Test Relative", - rule: chronograf.AlertRule{ - Trigger: "relative", - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Change: "% change", - }, - }, - want: `var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: abs(float("current.value" - "past.value")) / float("past.value") * 100.0) - .keep() - .as('value') - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) -`, - wantErr: false, - }, - { - name: "Test Relative percent change", - rule: chronograf.AlertRule{ - Trigger: "relative", - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Change: "change", - }, - }, - want: `var past = data - |shift(shift) - -var current = data - -var trigger = past - |join(current) - .as('past', 'current') - |eval(lambda: float("current.value" - "past.value")) - .keep() - .as('value') - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) -`, - wantErr: false, - }, - { - name: "Test Threshold", - rule: chronograf.AlertRule{ - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - }, - }, - want: `var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) -`, - wantErr: false, - }, - { - name: "Test Invalid", - rule: chronograf.AlertRule{ - Trigger: "invalid", - }, - want: ``, - wantErr: true, - }, - } - for _, tt := range tests { - got, err := Trigger(tt.rule) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Trigger() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - formatted, err := formatTick(got) - if err != nil { - t.Errorf("%q. formatTick() error = %v", tt.name, err) - continue - } - if string(formatted) != tt.want { - t.Errorf("%q. Trigger() = \n%v\n want \n%v\n", tt.name, string(formatted), tt.want) - } - } -} diff --git a/chronograf/.kapacitor/validate.go b/chronograf/.kapacitor/validate.go deleted file mode 100644 index 3a1d12cec2d..00000000000 --- a/chronograf/.kapacitor/validate.go +++ /dev/null @@ -1,67 +0,0 @@ -package kapacitor - -import ( - "bytes" - "fmt" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/kapacitor/pipeline" - "github.com/influxdata/kapacitor/tick" - "github.com/influxdata/kapacitor/tick/ast" - "github.com/influxdata/kapacitor/tick/stateful" -) - -// ValidateAlert checks if the alert is a valid kapacitor alert service. -func ValidateAlert(service string) error { - // Simple tick script to check alert service. - // If a pipeline cannot be created then we know this is an invalid - // service. At least with this version of kapacitor! - script := fmt.Sprintf("stream|from()|alert()%s", service) - return validateTick(chronograf.TICKScript(script)) -} - -func formatTick(tickscript string) (chronograf.TICKScript, error) { - node, err := ast.Parse(tickscript) - if err != nil { - return "", err - } - - output := new(bytes.Buffer) - node.Format(output, "", true) - return chronograf.TICKScript(output.String()), nil -} - -func validateTick(script chronograf.TICKScript) error { - _, err := newPipeline(script) - return err -} - -func newPipeline(script chronograf.TICKScript) (*pipeline.Pipeline, error) { - edge := pipeline.StreamEdge - if strings.Contains(string(script), "batch") { - edge = pipeline.BatchEdge - } - - scope := stateful.NewScope() - predefinedVars := map[string]tick.Var{} - return pipeline.CreatePipeline(string(script), edge, scope, &deadman{}, predefinedVars) -} - -// deadman is an empty implementation of a kapacitor DeadmanService to allow CreatePipeline -var _ pipeline.DeadmanService = &deadman{} - -type deadman struct { - interval time.Duration - threshold float64 - id string - message string - global bool -} - -func (d deadman) Interval() time.Duration { return d.interval } -func (d deadman) Threshold() float64 { return d.threshold } -func (d deadman) Id() string { return d.id } -func (d deadman) Message() string { return d.message } -func (d deadman) Global() bool { return d.global } diff --git a/chronograf/.kapacitor/validate_test.go b/chronograf/.kapacitor/validate_test.go deleted file mode 100644 index 41f997d120e..00000000000 --- a/chronograf/.kapacitor/validate_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package kapacitor - -import "testing" -import "github.com/influxdata/influxdb/v2/chronograf" - -func TestValidateAlert(t *testing.T) { - tests := []struct { - name string - service string - wantErr bool - }{ - { - name: "Test valid template alert", - service: ".slack()", - wantErr: false, - }, - { - name: "Test invalid template alert", - service: ".invalid()", - wantErr: true, - }, - } - for _, tt := range tests { - if err := ValidateAlert(tt.service); (err != nil) != tt.wantErr { - t.Errorf("%q. ValidateAlert() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func Test_validateTick(t *testing.T) { - tests := []struct { - name string - script chronograf.TICKScript - wantErr bool - }{ - { - name: "Valid Script", - script: "stream|from()", - wantErr: false, - }, - { - name: "Invalid Script", - script: "stream|nothing", - wantErr: true, - }, - } - for _, tt := range tests { - if err := validateTick(tt.script); (err != nil) != tt.wantErr { - t.Errorf("%q. validateTick() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} diff --git a/chronograf/.kapacitor/vars.go b/chronograf/.kapacitor/vars.go deleted file mode 100644 index b0e2eeb8303..00000000000 --- a/chronograf/.kapacitor/vars.go +++ /dev/null @@ -1,271 +0,0 @@ -package kapacitor - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var ( - // Database is the output database for alerts. - Database = "chronograf" - // RP will be autogen for alerts because it is default. - RP = "autogen" - // Measurement will be alerts so that the app knows where to get this data. - Measurement = "alerts" - // IDTag is the output tag key for the ID of the alert - IDTag = "alertID" - //LevelTag is the output tag key for the alert level information - LevelTag = "level" - // MessageField is the output field key for the message in the alert - MessageField = "message" - // DurationField is the output field key for the duration of the alert - DurationField = "duration" -) - -// Vars builds the top level vars for a kapacitor alert script -func Vars(rule chronograf.AlertRule) (string, error) { - common, err := commonVars(rule) - if err != nil { - return "", err - } - - switch rule.Trigger { - case Threshold: - if rule.TriggerValues.RangeValue == "" { - vars := ` - %s - var crit = %s - ` - return fmt.Sprintf(vars, common, formatValue(rule.TriggerValues.Value)), nil - } - vars := ` - %s - var lower = %s - var upper = %s -` - return fmt.Sprintf(vars, - common, - rule.TriggerValues.Value, - rule.TriggerValues.RangeValue), nil - case Relative: - vars := ` - %s - var shift = %s - var crit = %s - ` - return fmt.Sprintf(vars, - common, - rule.TriggerValues.Shift, - rule.TriggerValues.Value, - ), nil - case Deadman: - vars := ` - %s - var threshold = %s - ` - return fmt.Sprintf(vars, - common, - "0.0", // deadman threshold hardcoded to zero - ), nil - default: - return "", fmt.Errorf("unknown trigger mechanism") - } -} - -// NotEmpty is an error collector checking if strings are empty values -type NotEmpty struct { - Err error -} - -// Valid checks if string s is empty and if so reports an error using name -func (n *NotEmpty) Valid(name, s string) error { - if n.Err != nil { - return n.Err - - } - if s == "" { - n.Err = fmt.Errorf("%s cannot be an empty string", name) - } - return n.Err -} - -// Escape sanitizes strings with single quotes for kapacitor -func Escape(str string) string { - return strings.Replace(str, "'", `\'`, -1) -} - -func commonVars(rule chronograf.AlertRule) (string, error) { - n := new(NotEmpty) - n.Valid("database", rule.Query.Database) - n.Valid("retention policy", rule.Query.RetentionPolicy) - n.Valid("measurement", rule.Query.Measurement) - n.Valid("alert name", rule.Name) - n.Valid("trigger type", rule.Trigger) - if n.Err != nil { - return "", n.Err - } - - wind, err := window(rule) - if err != nil { - return "", err - } - - common := ` - var db = '%s' - var rp = '%s' - var measurement = '%s' - var groupBy = %s - var whereFilter = %s - %s - - var name = '%s' - var idVar = %s - var message = '%s' - var idTag = '%s' - var levelTag = '%s' - var messageField = '%s' - var durationField = '%s' - - var outputDB = '%s' - var outputRP = '%s' - var outputMeasurement = '%s' - var triggerType = '%s' - ` - res := fmt.Sprintf(common, - Escape(rule.Query.Database), - Escape(rule.Query.RetentionPolicy), - Escape(rule.Query.Measurement), - groupBy(rule.Query), - whereFilter(rule.Query), - wind, - Escape(rule.Name), - idVar(rule.Query), - Escape(rule.Message), - IDTag, - LevelTag, - MessageField, - DurationField, - Database, - RP, - Measurement, - rule.Trigger, - ) - - if rule.Details != "" { - res += fmt.Sprintf(` - var details = '%s' - `, rule.Details) - } - return res, nil -} - -// window is only used if deadman or threshold/relative with aggregate. Will return empty -// if no period. -func window(rule chronograf.AlertRule) (string, error) { - if rule.Trigger == Deadman { - if rule.TriggerValues.Period == "" { - return "", fmt.Errorf("period cannot be an empty string in deadman alert") - } - return fmt.Sprintf("var period = %s", rule.TriggerValues.Period), nil - - } - // Period only makes sense if the field has a been grouped via a time duration. - for _, field := range rule.Query.Fields { - if field.Type == "func" { - n := new(NotEmpty) - n.Valid("group by time", rule.Query.GroupBy.Time) - n.Valid("every", rule.Every) - if n.Err != nil { - return "", n.Err - } - return fmt.Sprintf("var period = %s\nvar every = %s", rule.Query.GroupBy.Time, rule.Every), nil - } - } - return "", nil -} - -func groupBy(q *chronograf.QueryConfig) string { - groups := []string{} - if q != nil { - for _, tag := range q.GroupBy.Tags { - groups = append(groups, fmt.Sprintf("'%s'", tag)) - } - } - return "[" + strings.Join(groups, ",") + "]" -} - -func idVar(q *chronograf.QueryConfig) string { - if len(q.GroupBy.Tags) > 0 { - return `name + ':{{.Group}}'` - } - return "name" -} - -func field(q *chronograf.QueryConfig) (string, error) { - if q == nil { - return "", fmt.Errorf("no fields set in query") - } - if len(q.Fields) != 1 { - return "", fmt.Errorf("expect only one field but found %d", len(q.Fields)) - } - field := q.Fields[0] - if field.Type == "func" { - for _, arg := range field.Args { - if arg.Type == "field" { - f, ok := arg.Value.(string) - if !ok { - return "", fmt.Errorf("field value %v is should be string but is %T", arg.Value, arg.Value) - } - return f, nil - } - } - return "", fmt.Errorf("no fields set in query") - } - f, ok := field.Value.(string) - if !ok { - return "", fmt.Errorf("field value %v is should be string but is %T", field.Value, field.Value) - } - return f, nil -} - -func whereFilter(q *chronograf.QueryConfig) string { - if q != nil { - operator := "==" - if !q.AreTagsAccepted { - operator = "!=" - } - - outer := []string{} - for tag, values := range q.Tags { - inner := []string{} - for _, value := range values { - inner = append(inner, fmt.Sprintf(`"%s" %s '%s'`, tag, operator, value)) - } - outer = append(outer, "("+strings.Join(inner, " OR ")+")") - } - if len(outer) > 0 { - sort.Strings(outer) - return "lambda: " + strings.Join(outer, " AND ") - } - } - return "lambda: TRUE" -} - -// formatValue return the same string if a numeric type or if it is a string -// will return it as a kapacitor formatted single-quoted string -func formatValue(value string) string { - // Test if numeric if it can be converted to a float - if _, err := strconv.ParseFloat(value, 64); err == nil { - return value - } - - // If the value is a kapacitor boolean value perform no formatting - if value == "TRUE" || value == "FALSE" { - return value - } - return "'" + Escape(value) + "'" -} diff --git a/chronograf/.kapacitor/vars_test.go b/chronograf/.kapacitor/vars_test.go deleted file mode 100644 index 871063e1ea7..00000000000 --- a/chronograf/.kapacitor/vars_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package kapacitor - -import ( - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestVarsCritStringEqual(t *testing.T) { - alert := chronograf.AlertRule{ - Name: "name", - Trigger: "threshold", - TriggerValues: chronograf.TriggerValues{ - Operator: "equal to", - Value: "DOWN", - }, - Every: "30s", - Query: &chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "haproxy", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "status", - Type: "field", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "10m", - Tags: []string{"pxname"}, - }, - AreTagsAccepted: true, - }, - } - - raw, err := Vars(alert) - if err != nil { - fmt.Printf("%s", raw) - t.Fatalf("Error generating alert: %v %s", err, raw) - } - - tick, err := formatTick(raw) - if err != nil { - t.Errorf("Error formatting alert: %v %s", err, raw) - } - - if err := validateTick(tick); err != nil { - t.Errorf("Error validating alert: %v %s", err, tick) - } -} - -func Test_formatValue(t *testing.T) { - tests := []struct { - name string - value string - want string - }{ - { - name: "parses floats", - value: "3.14", - want: "3.14", - }, - { - name: "parses booleans", - value: "TRUE", - want: "TRUE", - }, - { - name: "single quotes for strings", - value: "up", - want: "'up'", - }, - { - name: "handles escaping of single quotes", - value: "down's", - want: "'down\\'s'", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := formatValue(tt.value); got != tt.want { - t.Errorf("formatValue() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/Makefile b/chronograf/Makefile deleted file mode 100644 index ac1ccb08fec..00000000000 --- a/chronograf/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# List any generated files here -TARGETS = -# List any source files used to generate the targets here -SOURCES = -# List any directories that have their own Makefile here -SUBDIRS = dist server canned - -# Default target -all: $(SUBDIRS) $(TARGETS) - -# Recurse into subdirs for same make goal -$(SUBDIRS): - $(MAKE) -C $@ $(MAKECMDGOALS) - -# Clean all targets recursively -clean: $(SUBDIRS) - rm -f $(TARGETS) - -# Define go generate if not already defined -GO_GENERATE := go generate - -# Run go generate for the targets -$(TARGETS): $(SOURCES) - $(GO_GENERATE) -x - -.PHONY: all clean $(SUBDIRS) diff --git a/chronograf/bolt/base.go b/chronograf/bolt/base.go deleted file mode 100644 index 4719d8cb5d4..00000000000 --- a/chronograf/bolt/base.go +++ /dev/null @@ -1,94 +0,0 @@ -package bolt - -import ( - "time" - - bolt "go.etcd.io/bbolt" -) - -// SchemaVersionBucket stores ids of completed migrations -var SchemaVersionBucket = []byte("SchemaVersions") - -// IsMigrationComplete checks for the presence of a particular migration id -func IsMigrationComplete(db *bolt.DB, id string) (bool, error) { - complete := false - if err := db.View(func(tx *bolt.Tx) error { - migration := tx.Bucket(SchemaVersionBucket).Get([]byte(id)) - if migration != nil { - complete = true - } - return nil - }); err != nil { - return true, err - } - - return complete, nil -} - -// MarkMigrationAsComplete adds the migration id to the schema bucket -func MarkMigrationAsComplete(db *bolt.DB, id string) error { - if err := db.Update(func(tx *bolt.Tx) error { - now := time.Now().UTC().Format(time.RFC3339) - return tx.Bucket(SchemaVersionBucket).Put([]byte(id), []byte(now)) - }); err != nil { - return err - } - - return nil -} - -// Migration defines a database state/schema transition -// ID: After the migration is run, this id is stored in the database. -// We don't want to run a state transition twice -// Up: The forward-transition function. After a version upgrade, a number -// of these will run on database startup in order to bring a user's -// schema in line with struct definitions in the new version. -// Down: The backward-transition function. We don't expect these to be -// run on a user's database -- if the user needs to rollback -// to a previous version, it will be easier for them to replace -// their current database with one of their backups. The primary -// purpose of a Down() function is to help contributors move across -// development branches that have different schema definitions. -type Migration struct { - ID string - Up func(db *bolt.DB) error - Down func(db *bolt.DB) error -} - -// Migrate runs one migration's Up() function, if it has not already been run -func (m Migration) Migrate(client *Client) error { - complete, err := IsMigrationComplete(client.db, m.ID) - if err != nil { - return err - } - if complete { - return nil - } - - if client.logger != nil { - client.logger.Info("Running migration ", m.ID, "") - } - - if err = m.Up(client.db); err != nil { - return err - } - - return MarkMigrationAsComplete(client.db, m.ID) -} - -// MigrateAll iterates through all known migrations and runs them in order -func MigrateAll(client *Client) error { - for _, m := range migrations { - err := m.Migrate(client) - - if err != nil { - return err - } - } - - return nil -} - -var migrations = []Migration{ - changeIntervalToDuration, -} diff --git a/chronograf/bolt/bolt_test.go b/chronograf/bolt/bolt_test.go deleted file mode 100644 index 7f452e59338..00000000000 --- a/chronograf/bolt/bolt_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package bolt_test - -import ( - "context" - "errors" - "io/ioutil" - "os" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -// TestNow is a set time for testing. -var TestNow = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) - -// TestClient wraps *bolt.Client. -type TestClient struct { - *bolt.Client -} - -// NewTestClient creates new *bolt.Client with a set time and temp path. -func NewTestClient() (*TestClient, error) { - f, err := ioutil.TempFile("", "chronograf-bolt-") - if err != nil { - return nil, errors.New("unable to open temporary boltdb file") - } - f.Close() - - c := &TestClient{ - Client: bolt.NewClient(), - } - c.Path = f.Name() - c.Now = func() time.Time { return TestNow } - - build := chronograf.BuildInfo{ - Version: "version", - Commit: "commit", - } - - c.Open(context.TODO(), mocks.NewLogger(), build) - - return c, nil -} - -func (c *TestClient) Close() error { - defer os.Remove(c.Path) - return c.Client.Close() -} diff --git a/chronograf/bolt/build.go b/chronograf/bolt/build.go deleted file mode 100644 index 3386aff1017..00000000000 --- a/chronograf/bolt/build.go +++ /dev/null @@ -1,83 +0,0 @@ -package bolt - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure BuildStore struct implements chronograf.BuildStore interface -var _ chronograf.BuildStore = &BuildStore{} - -// BuildBucket is the bolt bucket used to store Chronograf build information -var BuildBucket = []byte("Build") - -// BuildKey is the constant key used in the bolt bucket -var BuildKey = []byte("build") - -// BuildStore is a bolt implementation to store Chronograf build information -type BuildStore struct { - client *Client -} - -// Get retrieves Chronograf build information from the database -func (s *BuildStore) Get(ctx context.Context) (chronograf.BuildInfo, error) { - var build chronograf.BuildInfo - if err := s.client.db.View(func(tx *bolt.Tx) error { - var err error - build, err = s.get(ctx, tx) - if err != nil { - return err - } - return nil - }); err != nil { - return chronograf.BuildInfo{}, err - } - - return build, nil -} - -// Update overwrites the current Chronograf build information in the database -func (s *BuildStore) Update(ctx context.Context, build chronograf.BuildInfo) error { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - return s.update(ctx, build, tx) - }); err != nil { - return err - } - - return nil -} - -// Migrate simply stores the current version in the database -func (s *BuildStore) Migrate(ctx context.Context, build chronograf.BuildInfo) error { - return s.Update(ctx, build) -} - -// get retrieves the current build, falling back to a default when missing -func (s *BuildStore) get(ctx context.Context, tx *bolt.Tx) (chronograf.BuildInfo, error) { - var build chronograf.BuildInfo - defaultBuild := chronograf.BuildInfo{ - Version: "pre-1.4.0.0", - Commit: "", - } - - if bucket := tx.Bucket(BuildBucket); bucket == nil { - return defaultBuild, nil - } else if v := bucket.Get(BuildKey); v == nil { - return defaultBuild, nil - } else if err := internal.UnmarshalBuild(v, &build); err != nil { - return build, err - } - return build, nil -} - -func (s *BuildStore) update(ctx context.Context, build chronograf.BuildInfo, tx *bolt.Tx) error { - if v, err := internal.MarshalBuild(build); err != nil { - return err - } else if err := tx.Bucket(BuildBucket).Put(BuildKey, v); err != nil { - return err - } - return nil -} diff --git a/chronograf/bolt/build_test.go b/chronograf/bolt/build_test.go deleted file mode 100644 index 7a29be442ed..00000000000 --- a/chronograf/bolt/build_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package bolt_test - -// import ( -// "testing" - -// "github.com/google/go-cmp/cmp" -// "github.com/influxdata/influxdb/chronograf" -// ) - -// func -// func TestBuildStore_Get(t *testing.T) { -// type wants struct { -// build *chronograf.BuildInfo -// err error -// } -// tests := []struct { -// name string -// wants wants -// }{ -// { -// name: "When the build info is missing", -// wants: wants{ -// build: &chronograf.BuildInfo{ -// Version: "pre-1.4.0.0", -// Commit: "", -// }, -// }, -// }, -// } -// for _, tt := range tests { -// client, err := NewTestClient() -// if err != nil { -// t.Fatal(err) -// } -// if err := client.Open(context.TODO()); err != nil { -// t.Fatal(err) -// } -// defer client.Close() - -// b := client.BuildStore -// got, err := b.Get(context.Background()) -// if (tt.wants.err != nil) != (err != nil) { -// t.Errorf("%q. BuildStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) -// continue -// } -// if diff := cmp.Diff(got, tt.wants.build); diff != "" { -// t.Errorf("%q. BuildStore.Get():\n-got/+want\ndiff %s", tt.name, diff) -// } -// } -// } - -// func TestBuildStore_Update(t *testing.T) { - -// } diff --git a/chronograf/bolt/change_interval_to_duration.go b/chronograf/bolt/change_interval_to_duration.go deleted file mode 100644 index ba4aa659f8a..00000000000 --- a/chronograf/bolt/change_interval_to_duration.go +++ /dev/null @@ -1,1424 +0,0 @@ -package bolt - -import ( - "log" - "strings" - - "github.com/gogo/protobuf/proto" - bolt "go.etcd.io/bbolt" -) - -// changeIntervalToDuration -// Before, we supported queries that included `GROUP BY :interval:` -// After, we only support queries with `GROUP BY time(:interval:)` -// thereby allowing non_negative_derivative(_____, :interval) -var changeIntervalToDuration = Migration{ - ID: "59b0cda4fc7909ff84ee5c4f9cb4b655b6a26620", - Up: up, - Down: down, -} - -func updateDashboard(board *Dashboard) { - for _, cell := range board.Cells { - for _, query := range cell.Queries { - query.Command = strings.Replace(query.Command, ":interval:", "time(:interval:)", -1) - } - } -} - -var up = func(db *bolt.DB) error { - // For each dashboard - err := db.Update(func(tx *bolt.Tx) error { - bucket := tx.Bucket(dashboardBucket) - err := bucket.ForEach(func(id, data []byte) error { - board := &Dashboard{} - - err := proto.Unmarshal(data, board) - if err != nil { - log.Fatal("unmarshalling error: ", err) - } - - // Migrate the dashboard - updateDashboard(board) - - data, err = proto.Marshal(board) - if err != nil { - log.Fatal("marshaling error: ", err) - } - - err = bucket.Put(id, data) - if err != nil { - log.Fatal("error updating dashboard: ", err) - } - - return nil - }) - - if err != nil { - log.Fatal("error updating dashboards: ", err) - } - - return nil - }) - - if err != nil { - return err - } - - return nil -} - -var down = func(db *bolt.DB) error { - return nil -} - -/* - Import protobuf types and bucket names that are pertinent to this migration. - This isolates the migration from the codebase, and prevents a future change - to a type definition from invalidating the migration functions. -*/ -var dashboardBucket = []byte("Dashoard") // N.B. leave the misspelling for backwards-compat! - -type Source struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Type string `protobuf:"bytes,3,opt,name=Type,proto3" json:"Type,omitempty"` - Username string `protobuf:"bytes,4,opt,name=Username,proto3" json:"Username,omitempty"` - Password string `protobuf:"bytes,5,opt,name=Password,proto3" json:"Password,omitempty"` - URL string `protobuf:"bytes,6,opt,name=URL,proto3" json:"URL,omitempty"` - Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"` - Telegraf string `protobuf:"bytes,8,opt,name=Telegraf,proto3" json:"Telegraf,omitempty"` - InsecureSkipVerify bool `protobuf:"varint,9,opt,name=InsecureSkipVerify,proto3" json:"InsecureSkipVerify,omitempty"` - MetaURL string `protobuf:"bytes,10,opt,name=MetaURL,proto3" json:"MetaURL,omitempty"` - SharedSecret string `protobuf:"bytes,11,opt,name=SharedSecret,proto3" json:"SharedSecret,omitempty"` - Organization string `protobuf:"bytes,12,opt,name=Organization,proto3" json:"Organization,omitempty"` - Role string `protobuf:"bytes,13,opt,name=Role,proto3" json:"Role,omitempty"` -} - -func (m *Source) Reset() { *m = Source{} } -func (m *Source) String() string { return proto.CompactTextString(m) } -func (*Source) ProtoMessage() {} -func (*Source) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} } - -func (m *Source) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Source) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Source) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Source) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *Source) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -func (m *Source) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -func (m *Source) GetDefault() bool { - if m != nil { - return m.Default - } - return false -} - -func (m *Source) GetTelegraf() string { - if m != nil { - return m.Telegraf - } - return "" -} - -func (m *Source) GetInsecureSkipVerify() bool { - if m != nil { - return m.InsecureSkipVerify - } - return false -} - -func (m *Source) GetMetaURL() string { - if m != nil { - return m.MetaURL - } - return "" -} - -func (m *Source) GetSharedSecret() string { - if m != nil { - return m.SharedSecret - } - return "" -} - -func (m *Source) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -func (m *Source) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type Dashboard struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Cells []*DashboardCell `protobuf:"bytes,3,rep,name=cells" json:"cells,omitempty"` - Templates []*Template `protobuf:"bytes,4,rep,name=templates" json:"templates,omitempty"` - Organization string `protobuf:"bytes,5,opt,name=Organization,proto3" json:"Organization,omitempty"` -} - -func (m *Dashboard) Reset() { *m = Dashboard{} } -func (m *Dashboard) String() string { return proto.CompactTextString(m) } -func (*Dashboard) ProtoMessage() {} -func (*Dashboard) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} } - -func (m *Dashboard) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Dashboard) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Dashboard) GetCells() []*DashboardCell { - if m != nil { - return m.Cells - } - return nil -} - -func (m *Dashboard) GetTemplates() []*Template { - if m != nil { - return m.Templates - } - return nil -} - -func (m *Dashboard) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -type DashboardCell struct { - X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"` - Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"` - W int32 `protobuf:"varint,3,opt,name=w,proto3" json:"w,omitempty"` - H int32 `protobuf:"varint,4,opt,name=h,proto3" json:"h,omitempty"` - Queries []*Query `protobuf:"bytes,5,rep,name=queries" json:"queries,omitempty"` - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,7,opt,name=type,proto3" json:"type,omitempty"` - ID string `protobuf:"bytes,8,opt,name=ID,proto3" json:"ID,omitempty"` - Axes map[string]*Axis `protobuf:"bytes,9,rep,name=axes" json:"axes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` - Colors []*Color `protobuf:"bytes,10,rep,name=colors" json:"colors,omitempty"` - Legend *Legend `protobuf:"bytes,11,opt,name=legend" json:"legend,omitempty"` - TableOptions *TableOptions `protobuf:"bytes,12,opt,name=tableOptions" json:"tableOptions,omitempty"` -} - -func (m *DashboardCell) Reset() { *m = DashboardCell{} } -func (m *DashboardCell) String() string { return proto.CompactTextString(m) } -func (*DashboardCell) ProtoMessage() {} -func (*DashboardCell) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} } - -func (m *DashboardCell) GetX() int32 { - if m != nil { - return m.X - } - return 0 -} - -func (m *DashboardCell) GetY() int32 { - if m != nil { - return m.Y - } - return 0 -} - -func (m *DashboardCell) GetW() int32 { - if m != nil { - return m.W - } - return 0 -} - -func (m *DashboardCell) GetH() int32 { - if m != nil { - return m.H - } - return 0 -} - -func (m *DashboardCell) GetQueries() []*Query { - if m != nil { - return m.Queries - } - return nil -} - -func (m *DashboardCell) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *DashboardCell) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *DashboardCell) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *DashboardCell) GetAxes() map[string]*Axis { - if m != nil { - return m.Axes - } - return nil -} - -func (m *DashboardCell) GetColors() []*Color { - if m != nil { - return m.Colors - } - return nil -} - -func (m *DashboardCell) GetLegend() *Legend { - if m != nil { - return m.Legend - } - return nil -} - -func (m *DashboardCell) GetTableOptions() *TableOptions { - if m != nil { - return m.TableOptions - } - return nil -} - -type TableOptions struct { - VerticalTimeAxis bool `protobuf:"varint,2,opt,name=verticalTimeAxis,proto3" json:"verticalTimeAxis,omitempty"` - SortBy *RenamableField `protobuf:"bytes,3,opt,name=sortBy" json:"sortBy,omitempty"` - Wrapping string `protobuf:"bytes,4,opt,name=wrapping,proto3" json:"wrapping,omitempty"` - FieldNames []*RenamableField `protobuf:"bytes,5,rep,name=fieldNames" json:"fieldNames,omitempty"` - FixFirstColumn bool `protobuf:"varint,6,opt,name=fixFirstColumn,proto3" json:"fixFirstColumn,omitempty"` -} - -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (m *TableOptions) String() string { return proto.CompactTextString(m) } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{3} } - -func (m *TableOptions) GetVerticalTimeAxis() bool { - if m != nil { - return m.VerticalTimeAxis - } - return false -} - -func (m *TableOptions) GetSortBy() *RenamableField { - if m != nil { - return m.SortBy - } - return nil -} - -func (m *TableOptions) GetWrapping() string { - if m != nil { - return m.Wrapping - } - return "" -} - -func (m *TableOptions) GetFieldNames() []*RenamableField { - if m != nil { - return m.FieldNames - } - return nil -} - -func (m *TableOptions) GetFixFirstColumn() bool { - if m != nil { - return m.FixFirstColumn - } - return false -} - -type RenamableField struct { - InternalName string `protobuf:"bytes,1,opt,name=internalName,proto3" json:"internalName,omitempty"` - DisplayName string `protobuf:"bytes,2,opt,name=displayName,proto3" json:"displayName,omitempty"` - Visible bool `protobuf:"varint,3,opt,name=visible,proto3" json:"visible,omitempty"` -} - -func (m *RenamableField) Reset() { *m = RenamableField{} } -func (m *RenamableField) String() string { return proto.CompactTextString(m) } -func (*RenamableField) ProtoMessage() {} -func (*RenamableField) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{4} } - -func (m *RenamableField) GetInternalName() string { - if m != nil { - return m.InternalName - } - return "" -} - -func (m *RenamableField) GetDisplayName() string { - if m != nil { - return m.DisplayName - } - return "" -} - -func (m *RenamableField) GetVisible() bool { - if m != nil { - return m.Visible - } - return false -} - -type Color struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Type string `protobuf:"bytes,2,opt,name=Type,proto3" json:"Type,omitempty"` - Hex string `protobuf:"bytes,3,opt,name=Hex,proto3" json:"Hex,omitempty"` - Name string `protobuf:"bytes,4,opt,name=Name,proto3" json:"Name,omitempty"` - Value string `protobuf:"bytes,5,opt,name=Value,proto3" json:"Value,omitempty"` -} - -func (m *Color) Reset() { *m = Color{} } -func (m *Color) String() string { return proto.CompactTextString(m) } -func (*Color) ProtoMessage() {} -func (*Color) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} } - -func (m *Color) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Color) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Color) GetHex() string { - if m != nil { - return m.Hex - } - return "" -} - -func (m *Color) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Color) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type Legend struct { - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` - Orientation string `protobuf:"bytes,2,opt,name=Orientation,proto3" json:"Orientation,omitempty"` -} - -func (m *Legend) Reset() { *m = Legend{} } -func (m *Legend) String() string { return proto.CompactTextString(m) } -func (*Legend) ProtoMessage() {} -func (*Legend) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} } - -func (m *Legend) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Legend) GetOrientation() string { - if m != nil { - return m.Orientation - } - return "" -} - -type Axis struct { - LegacyBounds []int64 `protobuf:"varint,1,rep,packed,name=legacyBounds" json:"legacyBounds,omitempty"` - Bounds []string `protobuf:"bytes,2,rep,name=bounds" json:"bounds,omitempty"` - Label string `protobuf:"bytes,3,opt,name=label,proto3" json:"label,omitempty"` - Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` - Suffix string `protobuf:"bytes,5,opt,name=suffix,proto3" json:"suffix,omitempty"` - Base string `protobuf:"bytes,6,opt,name=base,proto3" json:"base,omitempty"` - Scale string `protobuf:"bytes,7,opt,name=scale,proto3" json:"scale,omitempty"` -} - -func (m *Axis) Reset() { *m = Axis{} } -func (m *Axis) String() string { return proto.CompactTextString(m) } -func (*Axis) ProtoMessage() {} -func (*Axis) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} } - -func (m *Axis) GetLegacyBounds() []int64 { - if m != nil { - return m.LegacyBounds - } - return nil -} - -func (m *Axis) GetBounds() []string { - if m != nil { - return m.Bounds - } - return nil -} - -func (m *Axis) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *Axis) GetPrefix() string { - if m != nil { - return m.Prefix - } - return "" -} - -func (m *Axis) GetSuffix() string { - if m != nil { - return m.Suffix - } - return "" -} - -func (m *Axis) GetBase() string { - if m != nil { - return m.Base - } - return "" -} - -func (m *Axis) GetScale() string { - if m != nil { - return m.Scale - } - return "" -} - -type Template struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - TempVar string `protobuf:"bytes,2,opt,name=temp_var,json=tempVar,proto3" json:"temp_var,omitempty"` - Values []*TemplateValue `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` - Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` - Label string `protobuf:"bytes,5,opt,name=label,proto3" json:"label,omitempty"` - Query *TemplateQuery `protobuf:"bytes,6,opt,name=query" json:"query,omitempty"` -} - -func (m *Template) Reset() { *m = Template{} } -func (m *Template) String() string { return proto.CompactTextString(m) } -func (*Template) ProtoMessage() {} -func (*Template) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{8} } - -func (m *Template) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Template) GetTempVar() string { - if m != nil { - return m.TempVar - } - return "" -} - -func (m *Template) GetValues() []*TemplateValue { - if m != nil { - return m.Values - } - return nil -} - -func (m *Template) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Template) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *Template) GetQuery() *TemplateQuery { - if m != nil { - return m.Query - } - return nil -} - -type TemplateValue struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Selected bool `protobuf:"varint,3,opt,name=selected,proto3" json:"selected,omitempty"` -} - -func (m *TemplateValue) Reset() { *m = TemplateValue{} } -func (m *TemplateValue) String() string { return proto.CompactTextString(m) } -func (*TemplateValue) ProtoMessage() {} -func (*TemplateValue) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{9} } - -func (m *TemplateValue) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *TemplateValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *TemplateValue) GetSelected() bool { - if m != nil { - return m.Selected - } - return false -} - -type TemplateQuery struct { - Command string `protobuf:"bytes,1,opt,name=command,proto3" json:"command,omitempty"` - Db string `protobuf:"bytes,2,opt,name=db,proto3" json:"db,omitempty"` - Rp string `protobuf:"bytes,3,opt,name=rp,proto3" json:"rp,omitempty"` - Measurement string `protobuf:"bytes,4,opt,name=measurement,proto3" json:"measurement,omitempty"` - TagKey string `protobuf:"bytes,5,opt,name=tag_key,json=tagKey,proto3" json:"tag_key,omitempty"` - FieldKey string `protobuf:"bytes,6,opt,name=field_key,json=fieldKey,proto3" json:"field_key,omitempty"` -} - -func (m *TemplateQuery) Reset() { *m = TemplateQuery{} } -func (m *TemplateQuery) String() string { return proto.CompactTextString(m) } -func (*TemplateQuery) ProtoMessage() {} -func (*TemplateQuery) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{10} } - -func (m *TemplateQuery) GetCommand() string { - if m != nil { - return m.Command - } - return "" -} - -func (m *TemplateQuery) GetDb() string { - if m != nil { - return m.Db - } - return "" -} - -func (m *TemplateQuery) GetRp() string { - if m != nil { - return m.Rp - } - return "" -} - -func (m *TemplateQuery) GetMeasurement() string { - if m != nil { - return m.Measurement - } - return "" -} - -func (m *TemplateQuery) GetTagKey() string { - if m != nil { - return m.TagKey - } - return "" -} - -func (m *TemplateQuery) GetFieldKey() string { - if m != nil { - return m.FieldKey - } - return "" -} - -type Server struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Username string `protobuf:"bytes,3,opt,name=Username,proto3" json:"Username,omitempty"` - Password string `protobuf:"bytes,4,opt,name=Password,proto3" json:"Password,omitempty"` - URL string `protobuf:"bytes,5,opt,name=URL,proto3" json:"URL,omitempty"` - SrcID int64 `protobuf:"varint,6,opt,name=SrcID,proto3" json:"SrcID,omitempty"` - Active bool `protobuf:"varint,7,opt,name=Active,proto3" json:"Active,omitempty"` - Organization string `protobuf:"bytes,8,opt,name=Organization,proto3" json:"Organization,omitempty"` - InsecureSkipVerify bool `protobuf:"varint,9,opt,name=InsecureSkipVerify,proto3" json:"InsecureSkipVerify,omitempty"` -} - -func (m *Server) Reset() { *m = Server{} } -func (m *Server) String() string { return proto.CompactTextString(m) } -func (*Server) ProtoMessage() {} -func (*Server) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{11} } - -func (m *Server) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Server) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Server) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *Server) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -func (m *Server) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -func (m *Server) GetSrcID() int64 { - if m != nil { - return m.SrcID - } - return 0 -} - -func (m *Server) GetActive() bool { - if m != nil { - return m.Active - } - return false -} - -func (m *Server) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -func (m *Server) GetInsecureSkipVerify() bool { - if m != nil { - return m.InsecureSkipVerify - } - return false -} - -type Layout struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Application string `protobuf:"bytes,2,opt,name=Application,proto3" json:"Application,omitempty"` - Measurement string `protobuf:"bytes,3,opt,name=Measurement,proto3" json:"Measurement,omitempty"` - Cells []*Cell `protobuf:"bytes,4,rep,name=Cells" json:"Cells,omitempty"` - Autoflow bool `protobuf:"varint,5,opt,name=Autoflow,proto3" json:"Autoflow,omitempty"` -} - -func (m *Layout) Reset() { *m = Layout{} } -func (m *Layout) String() string { return proto.CompactTextString(m) } -func (*Layout) ProtoMessage() {} -func (*Layout) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{12} } - -func (m *Layout) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Layout) GetApplication() string { - if m != nil { - return m.Application - } - return "" -} - -func (m *Layout) GetMeasurement() string { - if m != nil { - return m.Measurement - } - return "" -} - -func (m *Layout) GetCells() []*Cell { - if m != nil { - return m.Cells - } - return nil -} - -func (m *Layout) GetAutoflow() bool { - if m != nil { - return m.Autoflow - } - return false -} - -type Cell struct { - X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"` - Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"` - W int32 `protobuf:"varint,3,opt,name=w,proto3" json:"w,omitempty"` - H int32 `protobuf:"varint,4,opt,name=h,proto3" json:"h,omitempty"` - Queries []*Query `protobuf:"bytes,5,rep,name=queries" json:"queries,omitempty"` - I string `protobuf:"bytes,6,opt,name=i,proto3" json:"i,omitempty"` - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` - Yranges []int64 `protobuf:"varint,8,rep,packed,name=yranges" json:"yranges,omitempty"` - Ylabels []string `protobuf:"bytes,9,rep,name=ylabels" json:"ylabels,omitempty"` - Type string `protobuf:"bytes,10,opt,name=type,proto3" json:"type,omitempty"` - Axes map[string]*Axis `protobuf:"bytes,11,rep,name=axes" json:"axes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Cell) Reset() { *m = Cell{} } -func (m *Cell) String() string { return proto.CompactTextString(m) } -func (*Cell) ProtoMessage() {} -func (*Cell) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{13} } - -func (m *Cell) GetX() int32 { - if m != nil { - return m.X - } - return 0 -} - -func (m *Cell) GetY() int32 { - if m != nil { - return m.Y - } - return 0 -} - -func (m *Cell) GetW() int32 { - if m != nil { - return m.W - } - return 0 -} - -func (m *Cell) GetH() int32 { - if m != nil { - return m.H - } - return 0 -} - -func (m *Cell) GetQueries() []*Query { - if m != nil { - return m.Queries - } - return nil -} - -func (m *Cell) GetI() string { - if m != nil { - return m.I - } - return "" -} - -func (m *Cell) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Cell) GetYranges() []int64 { - if m != nil { - return m.Yranges - } - return nil -} - -func (m *Cell) GetYlabels() []string { - if m != nil { - return m.Ylabels - } - return nil -} - -func (m *Cell) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Cell) GetAxes() map[string]*Axis { - if m != nil { - return m.Axes - } - return nil -} - -type Query struct { - Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"Command,omitempty"` - DB string `protobuf:"bytes,2,opt,name=DB,proto3" json:"DB,omitempty"` - RP string `protobuf:"bytes,3,opt,name=RP,proto3" json:"RP,omitempty"` - GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys" json:"GroupBys,omitempty"` - Wheres []string `protobuf:"bytes,5,rep,name=Wheres" json:"Wheres,omitempty"` - Label string `protobuf:"bytes,6,opt,name=Label,proto3" json:"Label,omitempty"` - Range *Range `protobuf:"bytes,7,opt,name=Range" json:"Range,omitempty"` - Source string `protobuf:"bytes,8,opt,name=Source,proto3" json:"Source,omitempty"` - Shifts []*TimeShift `protobuf:"bytes,9,rep,name=Shifts" json:"Shifts,omitempty"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{14} } - -func (m *Query) GetCommand() string { - if m != nil { - return m.Command - } - return "" -} - -func (m *Query) GetDB() string { - if m != nil { - return m.DB - } - return "" -} - -func (m *Query) GetRP() string { - if m != nil { - return m.RP - } - return "" -} - -func (m *Query) GetGroupBys() []string { - if m != nil { - return m.GroupBys - } - return nil -} - -func (m *Query) GetWheres() []string { - if m != nil { - return m.Wheres - } - return nil -} - -func (m *Query) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *Query) GetRange() *Range { - if m != nil { - return m.Range - } - return nil -} - -func (m *Query) GetSource() string { - if m != nil { - return m.Source - } - return "" -} - -func (m *Query) GetShifts() []*TimeShift { - if m != nil { - return m.Shifts - } - return nil -} - -type TimeShift struct { - Label string `protobuf:"bytes,1,opt,name=Label,proto3" json:"Label,omitempty"` - Unit string `protobuf:"bytes,2,opt,name=Unit,proto3" json:"Unit,omitempty"` - Quantity string `protobuf:"bytes,3,opt,name=Quantity,proto3" json:"Quantity,omitempty"` -} - -func (m *TimeShift) Reset() { *m = TimeShift{} } -func (m *TimeShift) String() string { return proto.CompactTextString(m) } -func (*TimeShift) ProtoMessage() {} -func (*TimeShift) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{15} } - -func (m *TimeShift) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *TimeShift) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *TimeShift) GetQuantity() string { - if m != nil { - return m.Quantity - } - return "" -} - -type Range struct { - Upper int64 `protobuf:"varint,1,opt,name=Upper,proto3" json:"Upper,omitempty"` - Lower int64 `protobuf:"varint,2,opt,name=Lower,proto3" json:"Lower,omitempty"` -} - -func (m *Range) Reset() { *m = Range{} } -func (m *Range) String() string { return proto.CompactTextString(m) } -func (*Range) ProtoMessage() {} -func (*Range) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{16} } - -func (m *Range) GetUpper() int64 { - if m != nil { - return m.Upper - } - return 0 -} - -func (m *Range) GetLower() int64 { - if m != nil { - return m.Lower - } - return 0 -} - -type AlertRule struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - JSON string `protobuf:"bytes,2,opt,name=JSON,proto3" json:"JSON,omitempty"` - SrcID int64 `protobuf:"varint,3,opt,name=SrcID,proto3" json:"SrcID,omitempty"` - KapaID int64 `protobuf:"varint,4,opt,name=KapaID,proto3" json:"KapaID,omitempty"` -} - -func (m *AlertRule) Reset() { *m = AlertRule{} } -func (m *AlertRule) String() string { return proto.CompactTextString(m) } -func (*AlertRule) ProtoMessage() {} -func (*AlertRule) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{17} } - -func (m *AlertRule) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *AlertRule) GetJSON() string { - if m != nil { - return m.JSON - } - return "" -} - -func (m *AlertRule) GetSrcID() int64 { - if m != nil { - return m.SrcID - } - return 0 -} - -func (m *AlertRule) GetKapaID() int64 { - if m != nil { - return m.KapaID - } - return 0 -} - -type User struct { - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Provider string `protobuf:"bytes,3,opt,name=Provider,proto3" json:"Provider,omitempty"` - Scheme string `protobuf:"bytes,4,opt,name=Scheme,proto3" json:"Scheme,omitempty"` - Roles []*Role `protobuf:"bytes,5,rep,name=Roles" json:"Roles,omitempty"` - SuperAdmin bool `protobuf:"varint,6,opt,name=SuperAdmin,proto3" json:"SuperAdmin,omitempty"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{18} } - -func (m *User) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *User) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *User) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *User) GetScheme() string { - if m != nil { - return m.Scheme - } - return "" -} - -func (m *User) GetRoles() []*Role { - if m != nil { - return m.Roles - } - return nil -} - -func (m *User) GetSuperAdmin() bool { - if m != nil { - return m.SuperAdmin - } - return false -} - -type Role struct { - Organization string `protobuf:"bytes,1,opt,name=Organization,proto3" json:"Organization,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` -} - -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{19} } - -func (m *Role) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -func (m *Role) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type Mapping struct { - Provider string `protobuf:"bytes,1,opt,name=Provider,proto3" json:"Provider,omitempty"` - Scheme string `protobuf:"bytes,2,opt,name=Scheme,proto3" json:"Scheme,omitempty"` - ProviderOrganization string `protobuf:"bytes,3,opt,name=ProviderOrganization,proto3" json:"ProviderOrganization,omitempty"` - ID string `protobuf:"bytes,4,opt,name=ID,proto3" json:"ID,omitempty"` - Organization string `protobuf:"bytes,5,opt,name=Organization,proto3" json:"Organization,omitempty"` -} - -func (m *Mapping) Reset() { *m = Mapping{} } -func (m *Mapping) String() string { return proto.CompactTextString(m) } -func (*Mapping) ProtoMessage() {} -func (*Mapping) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{20} } - -func (m *Mapping) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *Mapping) GetScheme() string { - if m != nil { - return m.Scheme - } - return "" -} - -func (m *Mapping) GetProviderOrganization() string { - if m != nil { - return m.ProviderOrganization - } - return "" -} - -func (m *Mapping) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Mapping) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -type Organization struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - DefaultRole string `protobuf:"bytes,3,opt,name=DefaultRole,proto3" json:"DefaultRole,omitempty"` -} - -func (m *Organization) Reset() { *m = Organization{} } -func (m *Organization) String() string { return proto.CompactTextString(m) } -func (*Organization) ProtoMessage() {} -func (*Organization) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{21} } - -func (m *Organization) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Organization) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Organization) GetDefaultRole() string { - if m != nil { - return m.DefaultRole - } - return "" -} - -type Config struct { - Auth *AuthConfig `protobuf:"bytes,1,opt,name=Auth" json:"Auth,omitempty"` -} - -func (m *Config) Reset() { *m = Config{} } -func (m *Config) String() string { return proto.CompactTextString(m) } -func (*Config) ProtoMessage() {} -func (*Config) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{22} } - -func (m *Config) GetAuth() *AuthConfig { - if m != nil { - return m.Auth - } - return nil -} - -type AuthConfig struct { - SuperAdminNewUsers bool `protobuf:"varint,1,opt,name=SuperAdminNewUsers,proto3" json:"SuperAdminNewUsers,omitempty"` -} - -func (m *AuthConfig) Reset() { *m = AuthConfig{} } -func (m *AuthConfig) String() string { return proto.CompactTextString(m) } -func (*AuthConfig) ProtoMessage() {} -func (*AuthConfig) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{23} } - -func (m *AuthConfig) GetSuperAdminNewUsers() bool { - if m != nil { - return m.SuperAdminNewUsers - } - return false -} - -type BuildInfo struct { - Version string `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"` - Commit string `protobuf:"bytes,2,opt,name=Commit,proto3" json:"Commit,omitempty"` -} - -func (m *BuildInfo) Reset() { *m = BuildInfo{} } -func (m *BuildInfo) String() string { return proto.CompactTextString(m) } -func (*BuildInfo) ProtoMessage() {} -func (*BuildInfo) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{24} } - -func (m *BuildInfo) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *BuildInfo) GetCommit() string { - if m != nil { - return m.Commit - } - return "" -} - -var fileDescriptorInternal = []byte{ - // 1586 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x5f, 0x8f, 0xdb, 0x44, - 0x10, 0x97, 0x93, 0x38, 0x89, 0x27, 0xd7, 0xe3, 0x64, 0x4e, 0xad, 0x29, 0x12, 0x0a, 0x16, 0x7f, - 0xc2, 0x9f, 0x1e, 0x55, 0x2a, 0xa4, 0xaa, 0x82, 0x4a, 0xb9, 0x0b, 0x2d, 0x47, 0xaf, 0xbd, 0xeb, - 0xe6, 0xee, 0x78, 0x42, 0xd5, 0x26, 0x99, 0x24, 0x56, 0x1d, 0xdb, 0xac, 0xed, 0xbb, 0x98, 0x8f, - 0xc0, 0x87, 0x40, 0x42, 0x82, 0x2f, 0x80, 0x78, 0xe1, 0x89, 0x77, 0x3e, 0x08, 0x5f, 0x01, 0x1e, - 0xd1, 0xec, 0xae, 0x1d, 0xe7, 0x92, 0x56, 0x45, 0x42, 0xbc, 0xed, 0x6f, 0x66, 0x3c, 0xbb, 0xf3, - 0x7f, 0x0c, 0xdb, 0x5e, 0x90, 0xa0, 0x08, 0xb8, 0xbf, 0x17, 0x89, 0x30, 0x09, 0xed, 0x66, 0x8e, - 0xdd, 0x3f, 0x2b, 0x50, 0x1f, 0x84, 0xa9, 0x18, 0xa1, 0xbd, 0x0d, 0x95, 0xc3, 0xbe, 0x63, 0xb4, - 0x8d, 0x4e, 0x95, 0x55, 0x0e, 0xfb, 0xb6, 0x0d, 0xb5, 0x27, 0x7c, 0x8e, 0x4e, 0xa5, 0x6d, 0x74, - 0x2c, 0x26, 0xcf, 0x44, 0x3b, 0xcd, 0x22, 0x74, 0xaa, 0x8a, 0x46, 0x67, 0xfb, 0x26, 0x34, 0xcf, - 0x62, 0xd2, 0x36, 0x47, 0xa7, 0x26, 0xe9, 0x05, 0x26, 0xde, 0x09, 0x8f, 0xe3, 0xcb, 0x50, 0x8c, - 0x1d, 0x53, 0xf1, 0x72, 0x6c, 0xef, 0x40, 0xf5, 0x8c, 0x1d, 0x39, 0x75, 0x49, 0xa6, 0xa3, 0xed, - 0x40, 0xa3, 0x8f, 0x13, 0x9e, 0xfa, 0x89, 0xd3, 0x68, 0x1b, 0x9d, 0x26, 0xcb, 0x21, 0xe9, 0x39, - 0x45, 0x1f, 0xa7, 0x82, 0x4f, 0x9c, 0xa6, 0xd2, 0x93, 0x63, 0x7b, 0x0f, 0xec, 0xc3, 0x20, 0xc6, - 0x51, 0x2a, 0x70, 0xf0, 0xdc, 0x8b, 0xce, 0x51, 0x78, 0x93, 0xcc, 0xb1, 0xa4, 0x82, 0x0d, 0x1c, - 0xba, 0xe5, 0x31, 0x26, 0x9c, 0xee, 0x06, 0xa9, 0x2a, 0x87, 0xb6, 0x0b, 0x5b, 0x83, 0x19, 0x17, - 0x38, 0x1e, 0xe0, 0x48, 0x60, 0xe2, 0xb4, 0x24, 0x7b, 0x85, 0x46, 0x32, 0xc7, 0x62, 0xca, 0x03, - 0xef, 0x3b, 0x9e, 0x78, 0x61, 0xe0, 0x6c, 0x29, 0x99, 0x32, 0x8d, 0xbc, 0xc4, 0x42, 0x1f, 0x9d, - 0x6b, 0xca, 0x4b, 0x74, 0x76, 0x7f, 0x35, 0xc0, 0xea, 0xf3, 0x78, 0x36, 0x0c, 0xb9, 0x18, 0xbf, - 0x92, 0xaf, 0x6f, 0x81, 0x39, 0x42, 0xdf, 0x8f, 0x9d, 0x6a, 0xbb, 0xda, 0x69, 0x75, 0x6f, 0xec, - 0x15, 0x41, 0x2c, 0xf4, 0x1c, 0xa0, 0xef, 0x33, 0x25, 0x65, 0xdf, 0x06, 0x2b, 0xc1, 0x79, 0xe4, - 0xf3, 0x04, 0x63, 0xa7, 0x26, 0x3f, 0xb1, 0x97, 0x9f, 0x9c, 0x6a, 0x16, 0x5b, 0x0a, 0xad, 0x99, - 0x62, 0xae, 0x9b, 0xe2, 0xfe, 0x56, 0x85, 0x6b, 0x2b, 0xd7, 0xd9, 0x5b, 0x60, 0x2c, 0xe4, 0xcb, - 0x4d, 0x66, 0x2c, 0x08, 0x65, 0xf2, 0xd5, 0x26, 0x33, 0x32, 0x42, 0x97, 0x32, 0x37, 0x4c, 0x66, - 0x5c, 0x12, 0x9a, 0xc9, 0x8c, 0x30, 0x99, 0x31, 0xb3, 0x3f, 0x80, 0xc6, 0xb7, 0x29, 0x0a, 0x0f, - 0x63, 0xc7, 0x94, 0xaf, 0x7b, 0x6d, 0xf9, 0xba, 0xa7, 0x29, 0x8a, 0x8c, 0xe5, 0x7c, 0xf2, 0x86, - 0xcc, 0x26, 0x95, 0x1a, 0xf2, 0x4c, 0xb4, 0x84, 0x32, 0xaf, 0xa1, 0x68, 0x74, 0xd6, 0x5e, 0x54, - 0xf9, 0x40, 0x5e, 0xfc, 0x14, 0x6a, 0x7c, 0x81, 0xb1, 0x63, 0x49, 0xfd, 0x6f, 0xbf, 0xc0, 0x61, - 0x7b, 0xbd, 0x05, 0xc6, 0x5f, 0x04, 0x89, 0xc8, 0x98, 0x14, 0xb7, 0xdf, 0x87, 0xfa, 0x28, 0xf4, - 0x43, 0x11, 0x3b, 0x70, 0xf5, 0x61, 0x07, 0x44, 0x67, 0x9a, 0x6d, 0x77, 0xa0, 0xee, 0xe3, 0x14, - 0x83, 0xb1, 0xcc, 0x8c, 0x56, 0x77, 0x67, 0x29, 0x78, 0x24, 0xe9, 0x4c, 0xf3, 0xed, 0x7b, 0xb0, - 0x95, 0xf0, 0xa1, 0x8f, 0xc7, 0x11, 0x79, 0x31, 0x96, 0x59, 0xd2, 0xea, 0x5e, 0x2f, 0xc5, 0xa3, - 0xc4, 0x65, 0x2b, 0xb2, 0x37, 0x1f, 0x82, 0x55, 0xbc, 0x90, 0x8a, 0xe4, 0x39, 0x66, 0xd2, 0xdf, - 0x16, 0xa3, 0xa3, 0xfd, 0x0e, 0x98, 0x17, 0xdc, 0x4f, 0x55, 0xae, 0xb4, 0xba, 0xdb, 0x4b, 0x9d, - 0xbd, 0x85, 0x17, 0x33, 0xc5, 0xbc, 0x57, 0xb9, 0x6b, 0xb8, 0xdf, 0x57, 0x60, 0xab, 0x7c, 0x8f, - 0xfd, 0x16, 0x40, 0xe2, 0xcd, 0xf1, 0x41, 0x28, 0xe6, 0x3c, 0xd1, 0x3a, 0x4b, 0x14, 0xfb, 0x43, - 0xd8, 0xb9, 0x40, 0x91, 0x78, 0x23, 0xee, 0x9f, 0x7a, 0x73, 0x24, 0x7d, 0xf2, 0x96, 0x26, 0x5b, - 0xa3, 0xdb, 0xb7, 0xa1, 0x1e, 0x87, 0x22, 0xd9, 0xcf, 0x64, 0xbc, 0x5b, 0x5d, 0x67, 0xf9, 0x0e, - 0x86, 0x01, 0x9f, 0xd3, 0xbd, 0x0f, 0x3c, 0xf4, 0xc7, 0x4c, 0xcb, 0x51, 0x0d, 0x5f, 0x0a, 0x1e, - 0x45, 0x5e, 0x30, 0xcd, 0xfb, 0x44, 0x8e, 0xed, 0xbb, 0x00, 0x13, 0x12, 0xa6, 0xc4, 0xcf, 0xf3, - 0xe3, 0xc5, 0x1a, 0x4b, 0xb2, 0xf6, 0x7b, 0xb0, 0x3d, 0xf1, 0x16, 0x0f, 0x3c, 0x11, 0x27, 0x07, - 0xa1, 0x9f, 0xce, 0x03, 0x99, 0x35, 0x4d, 0x76, 0x85, 0xea, 0x46, 0xb0, 0xbd, 0xaa, 0x85, 0xd2, - 0x3f, 0xbf, 0x40, 0xd6, 0x9e, 0xf2, 0xc7, 0x0a, 0xcd, 0x6e, 0x43, 0x6b, 0xec, 0xc5, 0x91, 0xcf, - 0xb3, 0x52, 0x79, 0x96, 0x49, 0xd4, 0x4d, 0x2e, 0xbc, 0xd8, 0x1b, 0xfa, 0xaa, 0x29, 0x36, 0x59, - 0x0e, 0xdd, 0x29, 0x98, 0x32, 0x7d, 0x4a, 0xc5, 0x6e, 0xe5, 0xc5, 0x2e, 0x9b, 0x68, 0xa5, 0xd4, - 0x44, 0x77, 0xa0, 0xfa, 0x25, 0x2e, 0x74, 0x5f, 0xa5, 0x63, 0xd1, 0x12, 0x6a, 0xa5, 0x96, 0xb0, - 0x0b, 0xe6, 0xb9, 0x8c, 0xbd, 0x2a, 0x55, 0x05, 0xdc, 0xfb, 0x50, 0x57, 0xe9, 0x57, 0x68, 0x36, - 0x4a, 0x9a, 0xdb, 0xd0, 0x3a, 0x16, 0x1e, 0x06, 0x89, 0x2a, 0x72, 0x6d, 0x42, 0x89, 0xe4, 0xfe, - 0x62, 0x40, 0x4d, 0xc6, 0xd4, 0x85, 0x2d, 0x1f, 0xa7, 0x7c, 0x94, 0xed, 0x87, 0x69, 0x30, 0x8e, - 0x1d, 0xa3, 0x5d, 0xed, 0x54, 0xd9, 0x0a, 0xcd, 0xbe, 0x0e, 0xf5, 0xa1, 0xe2, 0x56, 0xda, 0xd5, - 0x8e, 0xc5, 0x34, 0xa2, 0xa7, 0xf9, 0x7c, 0x88, 0xbe, 0x36, 0x41, 0x01, 0x92, 0x8e, 0x04, 0x4e, - 0xbc, 0x85, 0x36, 0x43, 0x23, 0xa2, 0xc7, 0xe9, 0x84, 0xe8, 0xca, 0x12, 0x8d, 0xc8, 0x80, 0x21, - 0x8f, 0x8b, 0xca, 0xa7, 0x33, 0x69, 0x8e, 0x47, 0xdc, 0xcf, 0x4b, 0x5f, 0x01, 0xf7, 0x77, 0x83, - 0x46, 0x82, 0x6a, 0x65, 0x6b, 0x1e, 0x7e, 0x03, 0x9a, 0xd4, 0xe6, 0x9e, 0x5d, 0x70, 0xa1, 0x0d, - 0x6e, 0x10, 0x3e, 0xe7, 0xc2, 0xfe, 0x04, 0xea, 0xb2, 0x42, 0x36, 0xb4, 0xd5, 0x5c, 0x9d, 0xf4, - 0x2a, 0xd3, 0x62, 0x45, 0xe3, 0xa9, 0x95, 0x1a, 0x4f, 0x61, 0xac, 0x59, 0x36, 0xf6, 0x16, 0x98, - 0xd4, 0xc1, 0x32, 0xf9, 0xfa, 0x8d, 0x9a, 0x55, 0x9f, 0x53, 0x52, 0xee, 0x19, 0x5c, 0x5b, 0xb9, - 0xb1, 0xb8, 0xc9, 0x58, 0xbd, 0x69, 0x59, 0xed, 0x96, 0xae, 0x6e, 0x2a, 0xa5, 0x18, 0x7d, 0x1c, - 0x25, 0x38, 0xd6, 0x59, 0x57, 0x60, 0xf7, 0x47, 0x63, 0xa9, 0x57, 0xde, 0x47, 0x29, 0x3a, 0x0a, - 0xe7, 0x73, 0x1e, 0x8c, 0xb5, 0xea, 0x1c, 0x92, 0xdf, 0xc6, 0x43, 0xad, 0xba, 0x32, 0x1e, 0x12, - 0x16, 0x91, 0x8e, 0x60, 0x45, 0x44, 0x94, 0x3b, 0x73, 0xe4, 0x71, 0x2a, 0x70, 0x8e, 0x41, 0xa2, - 0x5d, 0x50, 0x26, 0xd9, 0x37, 0xa0, 0x91, 0xf0, 0xe9, 0x33, 0xea, 0x51, 0x3a, 0x92, 0x09, 0x9f, - 0x3e, 0xc2, 0xcc, 0x7e, 0x13, 0x2c, 0x59, 0xa5, 0x92, 0xa5, 0xc2, 0xd9, 0x94, 0x84, 0x47, 0x98, - 0xb9, 0x7f, 0x1b, 0x50, 0x1f, 0xa0, 0xb8, 0x40, 0xf1, 0x4a, 0x93, 0xb0, 0xbc, 0x61, 0x54, 0x5f, - 0xb2, 0x61, 0xd4, 0x36, 0x6f, 0x18, 0xe6, 0x72, 0xc3, 0xd8, 0x05, 0x73, 0x20, 0x46, 0x87, 0x7d, - 0xf9, 0xa2, 0x2a, 0x53, 0x80, 0xb2, 0xb1, 0x37, 0x4a, 0xbc, 0x0b, 0xd4, 0x6b, 0x87, 0x46, 0x6b, - 0x03, 0xb2, 0xb9, 0x61, 0xd6, 0xff, 0xcb, 0xed, 0xc3, 0xfd, 0xc1, 0x80, 0xfa, 0x11, 0xcf, 0xc2, - 0x34, 0x59, 0xcb, 0xda, 0x36, 0xb4, 0x7a, 0x51, 0xe4, 0x7b, 0xa3, 0x95, 0x4a, 0x2d, 0x91, 0x48, - 0xe2, 0x71, 0x29, 0x1e, 0xca, 0x17, 0x65, 0x12, 0x4d, 0x87, 0x03, 0xb9, 0x34, 0xa8, 0x0d, 0xa0, - 0x34, 0x1d, 0xd4, 0xae, 0x20, 0x99, 0xe4, 0xb4, 0x5e, 0x9a, 0x84, 0x13, 0x3f, 0xbc, 0x94, 0xde, - 0x69, 0xb2, 0x02, 0xbb, 0x7f, 0x54, 0xa0, 0xf6, 0x7f, 0x0d, 0xfa, 0x2d, 0x30, 0x3c, 0x9d, 0x1c, - 0x86, 0x57, 0x8c, 0xfd, 0x46, 0x69, 0xec, 0x3b, 0xd0, 0xc8, 0x04, 0x0f, 0xa6, 0x18, 0x3b, 0x4d, - 0xd9, 0x8d, 0x72, 0x28, 0x39, 0xb2, 0xee, 0xd4, 0xbc, 0xb7, 0x58, 0x0e, 0x8b, 0x3a, 0x82, 0x52, - 0x1d, 0x7d, 0xac, 0x57, 0x83, 0xd6, 0xd5, 0xd1, 0xb2, 0x69, 0x23, 0xf8, 0xef, 0x46, 0xf0, 0x5f, - 0x06, 0x98, 0x45, 0x11, 0x1e, 0xac, 0x16, 0xe1, 0xc1, 0xb2, 0x08, 0xfb, 0xfb, 0x79, 0x11, 0xf6, - 0xf7, 0x09, 0xb3, 0x93, 0xbc, 0x08, 0xd9, 0x09, 0x05, 0xeb, 0xa1, 0x08, 0xd3, 0x68, 0x3f, 0x53, - 0x51, 0xb5, 0x58, 0x81, 0x29, 0x73, 0xbf, 0x9e, 0xa1, 0xd0, 0xae, 0xb6, 0x98, 0x46, 0x94, 0xe7, - 0x47, 0xb2, 0x41, 0x29, 0xe7, 0x2a, 0x60, 0xbf, 0x0b, 0x26, 0x23, 0xe7, 0x49, 0x0f, 0xaf, 0xc4, - 0x45, 0x92, 0x99, 0xe2, 0x92, 0x52, 0xf5, 0x4b, 0xa0, 0x13, 0x3e, 0xff, 0x41, 0xf8, 0x08, 0xea, - 0x83, 0x99, 0x37, 0x49, 0xf2, 0x05, 0xeb, 0xf5, 0x52, 0x83, 0xf3, 0xe6, 0x28, 0x79, 0x4c, 0x8b, - 0xb8, 0x4f, 0xc1, 0x2a, 0x88, 0xcb, 0xe7, 0x18, 0xe5, 0xe7, 0xd8, 0x50, 0x3b, 0x0b, 0xbc, 0x24, - 0x2f, 0x75, 0x3a, 0x93, 0xb1, 0x4f, 0x53, 0x1e, 0x24, 0x5e, 0x92, 0xe5, 0xa5, 0x9e, 0x63, 0xf7, - 0x8e, 0x7e, 0x3e, 0xa9, 0x3b, 0x8b, 0x22, 0x14, 0xba, 0x6d, 0x28, 0x20, 0x2f, 0x09, 0x2f, 0x51, - 0x75, 0xfc, 0x2a, 0x53, 0xc0, 0xfd, 0x06, 0xac, 0x9e, 0x8f, 0x22, 0x61, 0xa9, 0x8f, 0x9b, 0x26, - 0xf1, 0x57, 0x83, 0xe3, 0x27, 0xf9, 0x0b, 0xe8, 0xbc, 0x6c, 0x11, 0xd5, 0x2b, 0x2d, 0xe2, 0x11, - 0x8f, 0xf8, 0x61, 0x5f, 0xe6, 0x79, 0x95, 0x69, 0xe4, 0xfe, 0x64, 0x40, 0x8d, 0x7a, 0x51, 0x49, - 0x75, 0xed, 0x65, 0x7d, 0xec, 0x44, 0x84, 0x17, 0xde, 0x18, 0x45, 0x6e, 0x5c, 0x8e, 0xa5, 0xd3, - 0x47, 0x33, 0x2c, 0x06, 0xbe, 0x46, 0x94, 0x6b, 0xf4, 0xff, 0x90, 0xd7, 0x52, 0x29, 0xd7, 0x88, - 0xcc, 0x14, 0x93, 0x36, 0xbb, 0x41, 0x1a, 0xa1, 0xe8, 0x8d, 0xe7, 0x5e, 0xbe, 0x01, 0x95, 0x28, - 0xee, 0x7d, 0xf5, 0x47, 0xb2, 0xd6, 0xd1, 0x8c, 0xcd, 0x7f, 0x2f, 0x57, 0x5f, 0xee, 0xfe, 0x6c, - 0x40, 0xe3, 0xb1, 0xde, 0xd5, 0xca, 0x56, 0x18, 0x2f, 0xb4, 0xa2, 0xb2, 0x62, 0x45, 0x17, 0x76, - 0x73, 0x99, 0x95, 0xfb, 0x95, 0x17, 0x36, 0xf2, 0xb4, 0x47, 0x6b, 0x45, 0xb0, 0x5e, 0xe5, 0x77, - 0xe5, 0x74, 0x55, 0x66, 0x53, 0xc0, 0xd7, 0xa2, 0xd2, 0x86, 0x96, 0xfe, 0xcd, 0x94, 0x3f, 0x6d, - 0xba, 0xa9, 0x96, 0x48, 0x6e, 0x17, 0xea, 0x07, 0x61, 0x30, 0xf1, 0xa6, 0x76, 0x07, 0x6a, 0xbd, - 0x34, 0x99, 0x49, 0x8d, 0xad, 0xee, 0x6e, 0xa9, 0xf0, 0xd3, 0x64, 0xa6, 0x64, 0x98, 0x94, 0x70, - 0x3f, 0x03, 0x58, 0xd2, 0x68, 0x4a, 0x2c, 0xa3, 0xf1, 0x04, 0x2f, 0x29, 0x65, 0x62, 0xa9, 0xa5, - 0xc9, 0x36, 0x70, 0xdc, 0xcf, 0xc1, 0xda, 0x4f, 0x3d, 0x7f, 0x7c, 0x18, 0x4c, 0x42, 0x6a, 0x1d, - 0xe7, 0x28, 0xe2, 0x65, 0xbc, 0x72, 0x48, 0xee, 0xa6, 0x2e, 0x52, 0xd4, 0x90, 0x46, 0xc3, 0xba, - 0xfc, 0xcd, 0xbf, 0xf3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0x7c, 0x0d, 0xab, 0xf8, 0x0f, - 0x00, 0x00, -} diff --git a/chronograf/bolt/client.go b/chronograf/bolt/client.go deleted file mode 100644 index 481e8a9cbd7..00000000000 --- a/chronograf/bolt/client.go +++ /dev/null @@ -1,278 +0,0 @@ -package bolt - -import ( - "context" - "fmt" - "io" - "os" - "path" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/id" - bolt "go.etcd.io/bbolt" -) - -// Client is a client for the boltDB data store. -type Client struct { - Path string - db *bolt.DB - logger chronograf.Logger - isNew bool - Now func() time.Time - LayoutIDs chronograf.ID - - BuildStore *BuildStore - SourcesStore *SourcesStore - ServersStore *ServersStore - LayoutsStore *LayoutsStore - DashboardsStore *DashboardsStore - UsersStore *UsersStore - OrganizationsStore *OrganizationsStore - ConfigStore *ConfigStore - MappingsStore *MappingsStore - OrganizationConfigStore *OrganizationConfigStore -} - -// NewClient initializes all stores -func NewClient() *Client { - c := &Client{Now: time.Now} - c.BuildStore = &BuildStore{client: c} - c.SourcesStore = &SourcesStore{client: c} - c.ServersStore = &ServersStore{client: c} - c.LayoutsStore = &LayoutsStore{ - client: c, - IDs: &id.UUID{}, - } - c.DashboardsStore = &DashboardsStore{ - client: c, - IDs: &id.UUID{}, - } - c.UsersStore = &UsersStore{client: c} - c.OrganizationsStore = &OrganizationsStore{client: c} - c.ConfigStore = &ConfigStore{client: c} - c.MappingsStore = &MappingsStore{client: c} - c.OrganizationConfigStore = &OrganizationConfigStore{client: c} - return c -} - -// WithDB sets the boltdb database for a client. It should not be called -// after a call to Open. -func (c *Client) WithDB(db *bolt.DB) { - c.db = db -} - -// Option to change behavior of Open() -type Option interface { - Backup() bool -} - -// WithBackup returns a Backup -func WithBackup() Option { - return Backup{} -} - -// Backup tells Open to perform a backup prior to initialization -type Backup struct { -} - -// Backup returns true -func (b Backup) Backup() bool { - return true -} - -// Open / create boltDB file. -func (c *Client) Open(ctx context.Context, logger chronograf.Logger, build chronograf.BuildInfo, opts ...Option) error { - if c.db == nil { - if _, err := os.Stat(c.Path); os.IsNotExist(err) { - c.isNew = true - } else if err != nil { - return err - } - - // Open database file. - db, err := bolt.Open(c.Path, 0600, &bolt.Options{Timeout: 1 * time.Second}) - if err != nil { - return fmt.Errorf("unable to open boltdb; is there a chronograf already running? %v", err) - } - c.db = db - c.logger = logger - - for _, opt := range opts { - if opt.Backup() { - if err = c.backup(ctx, build); err != nil { - return fmt.Errorf("unable to backup your database prior to migrations: %v", err) - } - } - } - } - - if err := c.initialize(ctx); err != nil { - return fmt.Errorf("unable to boot boltdb: %v", err) - } - if err := c.migrate(ctx, build); err != nil { - return fmt.Errorf("unable to migrate boltdb: %v", err) - } - - return nil -} - -// initialize creates Buckets that are missing -func (c *Client) initialize(ctx context.Context) error { - if err := c.db.Update(func(tx *bolt.Tx) error { - // Always create SchemaVersions bucket. - if _, err := tx.CreateBucketIfNotExists(SchemaVersionBucket); err != nil { - return err - } - // Always create Organizations bucket. - if _, err := tx.CreateBucketIfNotExists(OrganizationsBucket); err != nil { - return err - } - // Always create Sources bucket. - if _, err := tx.CreateBucketIfNotExists(SourcesBucket); err != nil { - return err - } - // Always create Servers bucket. - if _, err := tx.CreateBucketIfNotExists(ServersBucket); err != nil { - return err - } - // Always create Layouts bucket. - if _, err := tx.CreateBucketIfNotExists(LayoutsBucket); err != nil { - return err - } - // Always create Dashboards bucket. - if _, err := tx.CreateBucketIfNotExists(DashboardsBucket); err != nil { - return err - } - // Always create Users bucket. - if _, err := tx.CreateBucketIfNotExists(UsersBucket); err != nil { - return err - } - // Always create Config bucket. - if _, err := tx.CreateBucketIfNotExists(ConfigBucket); err != nil { - return err - } - // Always create Build bucket. - if _, err := tx.CreateBucketIfNotExists(BuildBucket); err != nil { - return err - } - // Always create Mapping bucket. - if _, err := tx.CreateBucketIfNotExists(MappingsBucket); err != nil { - return err - } - // Always create OrganizationConfig bucket. - if _, err := tx.CreateBucketIfNotExists(OrganizationConfigBucket); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil -} - -// migrate moves data from an old schema to a new schema in each Store -func (c *Client) migrate(ctx context.Context, build chronograf.BuildInfo) error { - if c.db != nil { - // Runtime migrations - if err := c.OrganizationsStore.Migrate(ctx); err != nil { - return err - } - if err := c.SourcesStore.Migrate(ctx); err != nil { - return err - } - if err := c.ServersStore.Migrate(ctx); err != nil { - return err - } - if err := c.LayoutsStore.Migrate(ctx); err != nil { - return err - } - if err := c.DashboardsStore.Migrate(ctx); err != nil { - return err - } - if err := c.ConfigStore.Migrate(ctx); err != nil { - return err - } - if err := c.BuildStore.Migrate(ctx, build); err != nil { - return err - } - if err := c.MappingsStore.Migrate(ctx); err != nil { - return err - } - if err := c.OrganizationConfigStore.Migrate(ctx); err != nil { - return err - } - - MigrateAll(c) - } - return nil -} - -// Close the connection to the bolt database -func (c *Client) Close() error { - if c.db != nil { - return c.db.Close() - } - return nil -} - -// copy creates a copy of the database in toFile -func (c *Client) copy(ctx context.Context, version string) error { - backupDir := path.Join(path.Dir(c.Path), "backup") - if _, err := os.Stat(backupDir); os.IsNotExist(err) { - if err = os.Mkdir(backupDir, 0700); err != nil { - return err - } - } else if err != nil { - return err - } - - fromFile, err := os.Open(c.Path) - if err != nil { - return err - } - defer fromFile.Close() - - toName := fmt.Sprintf("%s.%s", path.Base(c.Path), version) - toPath := path.Join(backupDir, toName) - toFile, err := os.OpenFile(toPath, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return err - } - defer toFile.Close() - - _, err = io.Copy(toFile, fromFile) - if err != nil { - return err - } - - c.logger.Info("Successfully created ", toPath) - - return nil -} - -// backup makes a copy of the database to the backup/ directory, if necessary: -// - If this is a fresh install, don't create a backup and store the current version -// - If we are on the same version, don't create a backup -// - If the version has changed, create a backup and store the current version -func (c *Client) backup(ctx context.Context, build chronograf.BuildInfo) error { - lastBuild, err := c.BuildStore.Get(ctx) - if err != nil { - return err - } - if lastBuild.Version == build.Version { - return nil - } - if c.isNew { - return nil - } - - // The database was pre-existing, and the version has changed - // and so create a backup - - c.logger.Info("Moving from version ", lastBuild.Version) - c.logger.Info("Moving to version ", build.Version) - - return c.copy(ctx, lastBuild.Version) -} diff --git a/chronograf/bolt/config.go b/chronograf/bolt/config.go deleted file mode 100644 index fd3043edfa5..00000000000 --- a/chronograf/bolt/config.go +++ /dev/null @@ -1,71 +0,0 @@ -package bolt - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure ConfigStore implements chronograf.ConfigStore. -var _ chronograf.ConfigStore = &ConfigStore{} - -// ConfigBucket is used to store chronograf application state -var ConfigBucket = []byte("ConfigV1") - -// configID is the boltDB key where the configuration object is stored -var configID = []byte("config/v1") - -// ConfigStore uses bolt to store and retrieve global -// application configuration -type ConfigStore struct { - client *Client -} - -func (s *ConfigStore) Migrate(ctx context.Context) error { - if _, err := s.Get(ctx); err != nil { - return s.Initialize(ctx) - } - return nil -} - -func (s *ConfigStore) Initialize(ctx context.Context) error { - cfg := chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - } - return s.Update(ctx, &cfg) -} - -func (s *ConfigStore) Get(ctx context.Context) (*chronograf.Config, error) { - var cfg chronograf.Config - err := s.client.db.View(func(tx *bolt.Tx) error { - v := tx.Bucket(ConfigBucket).Get(configID) - if v == nil { - return chronograf.ErrConfigNotFound - } - return internal.UnmarshalConfig(v, &cfg) - }) - - if err != nil { - return nil, err - } - return &cfg, nil -} - -func (s *ConfigStore) Update(ctx context.Context, cfg *chronograf.Config) error { - if cfg == nil { - return fmt.Errorf("config provided was nil") - } - return s.client.db.Update(func(tx *bolt.Tx) error { - if v, err := internal.MarshalConfig(cfg); err != nil { - return err - } else if err := tx.Bucket(ConfigBucket).Put(configID, v); err != nil { - return err - } - return nil - }) -} diff --git a/chronograf/bolt/config_test.go b/chronograf/bolt/config_test.go deleted file mode 100644 index 3982bfe0bfa..00000000000 --- a/chronograf/bolt/config_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package bolt_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestConfig_Get(t *testing.T) { - type wants struct { - config *chronograf.Config - err error - } - tests := []struct { - name string - wants wants - }{ - { - name: "Get config", - wants: wants{ - config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.ConfigStore - got, err := s.Get(context.Background()) - if (tt.wants.err != nil) != (err != nil) { - t.Errorf("%q. ConfigStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - continue - } - if diff := cmp.Diff(got, tt.wants.config); diff != "" { - t.Errorf("%q. ConfigStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestConfig_Update(t *testing.T) { - type args struct { - config *chronograf.Config - } - type wants struct { - config *chronograf.Config - err error - } - tests := []struct { - name string - args args - wants wants - }{ - { - name: "Set config", - args: args{ - config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - wants: wants{ - config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.ConfigStore - err = s.Update(context.Background(), tt.args.config) - if (tt.wants.err != nil) != (err != nil) { - t.Errorf("%q. ConfigStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - continue - } - - got, _ := s.Get(context.Background()) - if (tt.wants.err != nil) != (err != nil) { - t.Errorf("%q. ConfigStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - continue - } - - if diff := cmp.Diff(got, tt.wants.config); diff != "" { - t.Errorf("%q. ConfigStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} diff --git a/chronograf/bolt/dashboards.go b/chronograf/bolt/dashboards.go deleted file mode 100644 index b37cf8f0872..00000000000 --- a/chronograf/bolt/dashboards.go +++ /dev/null @@ -1,194 +0,0 @@ -package bolt - -import ( - "context" - "strconv" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure DashboardsStore implements chronograf.DashboardsStore. -var _ chronograf.DashboardsStore = &DashboardsStore{} - -// DashboardsBucket is the bolt bucket dashboards are stored in -var DashboardsBucket = []byte("Dashoard") // N.B. leave the misspelling for backwards-compat! - -// DashboardsStore is the bolt implementation of storing dashboards -type DashboardsStore struct { - client *Client - IDs chronograf.ID -} - -// AddIDs is a migration function that adds ID information to existing dashboards -func (d *DashboardsStore) AddIDs(ctx context.Context, boards []chronograf.Dashboard) error { - for _, board := range boards { - update := false - for i, cell := range board.Cells { - // If there are is no id set, we generate one and update the dashboard - if cell.ID == "" { - id, err := d.IDs.Generate() - if err != nil { - return err - } - cell.ID = id - board.Cells[i] = cell - update = true - } - } - if !update { - continue - } - if err := d.Update(ctx, board); err != nil { - return err - } - } - return nil -} - -// Migrate updates the dashboards at runtime -func (d *DashboardsStore) Migrate(ctx context.Context) error { - // 1. Add UUIDs to cells without one - boards, err := d.All(ctx) - if err != nil { - return err - } - if err := d.AddIDs(ctx, boards); err != nil { - return nil - } - - defaultOrg, err := d.client.OrganizationsStore.DefaultOrganization(ctx) - if err != nil { - return err - } - - for _, board := range boards { - if board.Organization == "" { - board.Organization = defaultOrg.ID - if err := d.Update(ctx, board); err != nil { - return nil - } - } - } - - return nil -} - -// All returns all known dashboards -func (d *DashboardsStore) All(ctx context.Context) ([]chronograf.Dashboard, error) { - var srcs []chronograf.Dashboard - if err := d.client.db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket(DashboardsBucket).ForEach(func(k, v []byte) error { - var src chronograf.Dashboard - if err := internal.UnmarshalDashboard(v, &src); err != nil { - return err - } - srcs = append(srcs, src) - return nil - }); err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - - return srcs, nil -} - -// Add creates a new Dashboard in the DashboardsStore -func (d *DashboardsStore) Add(ctx context.Context, src chronograf.Dashboard) (chronograf.Dashboard, error) { - if err := d.client.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(DashboardsBucket) - id, _ := b.NextSequence() - - src.ID = chronograf.DashboardID(id) - // TODO: use FormatInt - strID := strconv.Itoa(int(id)) - for i, cell := range src.Cells { - cid, err := d.IDs.Generate() - if err != nil { - return err - } - cell.ID = cid - src.Cells[i] = cell - } - v, err := internal.MarshalDashboard(src) - if err != nil { - return err - } - return b.Put([]byte(strID), v) - }); err != nil { - return chronograf.Dashboard{}, err - } - - return src, nil -} - -// Get returns a Dashboard if the id exists. -func (d *DashboardsStore) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - var src chronograf.Dashboard - if err := d.client.db.View(func(tx *bolt.Tx) error { - strID := strconv.Itoa(int(id)) - if v := tx.Bucket(DashboardsBucket).Get([]byte(strID)); v == nil { - return chronograf.ErrDashboardNotFound - } else if err := internal.UnmarshalDashboard(v, &src); err != nil { - return err - } - return nil - }); err != nil { - return chronograf.Dashboard{}, err - } - - return src, nil -} - -// Delete the dashboard from DashboardsStore -func (d *DashboardsStore) Delete(ctx context.Context, dash chronograf.Dashboard) error { - if err := d.client.db.Update(func(tx *bolt.Tx) error { - strID := strconv.Itoa(int(dash.ID)) - if err := tx.Bucket(DashboardsBucket).Delete([]byte(strID)); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil -} - -// Update the dashboard in DashboardsStore -func (d *DashboardsStore) Update(ctx context.Context, dash chronograf.Dashboard) error { - if err := d.client.db.Update(func(tx *bolt.Tx) error { - // Get an existing dashboard with the same ID. - b := tx.Bucket(DashboardsBucket) - strID := strconv.Itoa(int(dash.ID)) - if v := b.Get([]byte(strID)); v == nil { - return chronograf.ErrDashboardNotFound - } - - for i, cell := range dash.Cells { - if cell.ID != "" { - continue - } - cid, err := d.IDs.Generate() - if err != nil { - return err - } - cell.ID = cid - dash.Cells[i] = cell - } - if v, err := internal.MarshalDashboard(dash); err != nil { - return err - } else if err := b.Put([]byte(strID), v); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil -} diff --git a/chronograf/bolt/internal/internal.go b/chronograf/bolt/internal/internal.go deleted file mode 100644 index dafc95bd67a..00000000000 --- a/chronograf/bolt/internal/internal.go +++ /dev/null @@ -1,863 +0,0 @@ -package internal - -import ( - "encoding/json" - "fmt" - - "github.com/gogo/protobuf/proto" - "github.com/influxdata/influxdb/v2/chronograf" -) - -//go:generate protoc --plugin ../../../scripts/protoc-gen-gogo --gogo_out=. internal.proto - -// MarshalBuild encodes a build to binary protobuf format. -func MarshalBuild(b chronograf.BuildInfo) ([]byte, error) { - return proto.Marshal(&BuildInfo{ - Version: b.Version, - Commit: b.Commit, - }) -} - -// UnmarshalBuild decodes a build from binary protobuf data. -func UnmarshalBuild(data []byte, b *chronograf.BuildInfo) error { - var pb BuildInfo - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - - b.Version = pb.Version - b.Commit = pb.Commit - return nil -} - -// MarshalSource encodes a source to binary protobuf format. -func MarshalSource(s chronograf.Source) ([]byte, error) { - return proto.Marshal(&Source{ - ID: int64(s.ID), - Name: s.Name, - Type: s.Type, - Username: s.Username, - Password: s.Password, - SharedSecret: s.SharedSecret, - URL: s.URL, - MetaURL: s.MetaURL, - InsecureSkipVerify: s.InsecureSkipVerify, - Default: s.Default, - Telegraf: s.Telegraf, - Organization: s.Organization, - Role: s.Role, - DefaultRP: s.DefaultRP, - }) -} - -// UnmarshalSource decodes a source from binary protobuf data. -func UnmarshalSource(data []byte, s *chronograf.Source) error { - var pb Source - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - - s.ID = int(pb.ID) - s.Name = pb.Name - s.Type = pb.Type - s.Username = pb.Username - s.Password = pb.Password - s.SharedSecret = pb.SharedSecret - s.URL = pb.URL - s.MetaURL = pb.MetaURL - s.InsecureSkipVerify = pb.InsecureSkipVerify - s.Default = pb.Default - s.Telegraf = pb.Telegraf - s.Organization = pb.Organization - s.Role = pb.Role - s.DefaultRP = pb.DefaultRP - return nil -} - -// MarshalServer encodes a server to binary protobuf format. -func MarshalServer(s chronograf.Server) ([]byte, error) { - var ( - metadata []byte - err error - ) - metadata, err = json.Marshal(s.Metadata) - if err != nil { - return nil, err - } - return proto.Marshal(&Server{ - ID: int64(s.ID), - SrcID: int64(s.SrcID), - Name: s.Name, - Username: s.Username, - Password: s.Password, - URL: s.URL, - Active: s.Active, - Organization: s.Organization, - InsecureSkipVerify: s.InsecureSkipVerify, - Type: s.Type, - MetadataJSON: string(metadata), - }) -} - -// UnmarshalServer decodes a server from binary protobuf data. -func UnmarshalServer(data []byte, s *chronograf.Server) error { - var pb Server - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - - s.Metadata = make(map[string]interface{}) - if len(pb.MetadataJSON) > 0 { - if err := json.Unmarshal([]byte(pb.MetadataJSON), &s.Metadata); err != nil { - return err - } - } - - s.ID = int(pb.ID) - s.SrcID = int(pb.SrcID) - s.Name = pb.Name - s.Username = pb.Username - s.Password = pb.Password - s.URL = pb.URL - s.Active = pb.Active - s.Organization = pb.Organization - s.InsecureSkipVerify = pb.InsecureSkipVerify - s.Type = pb.Type - return nil -} - -// MarshalLayout encodes a layout to binary protobuf format. -func MarshalLayout(l chronograf.Layout) ([]byte, error) { - cells := make([]*Cell, len(l.Cells)) - for i, c := range l.Cells { - queries := make([]*Query, len(c.Queries)) - for j, q := range c.Queries { - r := new(Range) - if q.Range != nil { - r.Upper, r.Lower = q.Range.Upper, q.Range.Lower - } - queries[j] = &Query{ - Command: q.Command, - DB: q.DB, - RP: q.RP, - GroupBys: q.GroupBys, - Wheres: q.Wheres, - Label: q.Label, - Range: r, - } - } - - axes := make(map[string]*Axis, len(c.Axes)) - for a, r := range c.Axes { - axes[a] = &Axis{ - Bounds: r.Bounds, - Label: r.Label, - } - } - - cells[i] = &Cell{ - X: c.X, - Y: c.Y, - W: c.W, - H: c.H, - I: c.I, - Name: c.Name, - Queries: queries, - Type: c.Type, - Axes: axes, - } - } - return proto.Marshal(&Layout{ - ID: l.ID, - Measurement: l.Measurement, - Application: l.Application, - Autoflow: l.Autoflow, - Cells: cells, - }) -} - -// UnmarshalLayout decodes a layout from binary protobuf data. -func UnmarshalLayout(data []byte, l *chronograf.Layout) error { - var pb Layout - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - - l.ID = pb.ID - l.Measurement = pb.Measurement - l.Application = pb.Application - l.Autoflow = pb.Autoflow - cells := make([]chronograf.Cell, len(pb.Cells)) - for i, c := range pb.Cells { - queries := make([]chronograf.Query, len(c.Queries)) - for j, q := range c.Queries { - queries[j] = chronograf.Query{ - Command: q.Command, - DB: q.DB, - RP: q.RP, - GroupBys: q.GroupBys, - Wheres: q.Wheres, - Label: q.Label, - } - if q.Range.Upper != q.Range.Lower { - queries[j].Range = &chronograf.Range{ - Upper: q.Range.Upper, - Lower: q.Range.Lower, - } - } - } - axes := make(map[string]chronograf.Axis, len(c.Axes)) - for a, r := range c.Axes { - axes[a] = chronograf.Axis{ - Bounds: r.Bounds, - Label: r.Label, - } - } - - cells[i] = chronograf.Cell{ - X: c.X, - Y: c.Y, - W: c.W, - H: c.H, - I: c.I, - Name: c.Name, - Queries: queries, - Type: c.Type, - Axes: axes, - } - } - l.Cells = cells - return nil -} - -// MarshalDashboard encodes a dashboard to binary protobuf format. -func MarshalDashboard(d chronograf.Dashboard) ([]byte, error) { - cells := make([]*DashboardCell, len(d.Cells)) - for i, c := range d.Cells { - queries := make([]*Query, len(c.Queries)) - for j, q := range c.Queries { - r := new(Range) - if q.Range != nil { - r.Upper, r.Lower = q.Range.Upper, q.Range.Lower - } - q.Shifts = q.QueryConfig.Shifts - queries[j] = &Query{ - Command: q.Command, - Label: q.Label, - Range: r, - Source: q.Source, - Type: q.Type, - } - - shifts := make([]*TimeShift, len(q.Shifts)) - for k := range q.Shifts { - shift := &TimeShift{ - Label: q.Shifts[k].Label, - Unit: q.Shifts[k].Unit, - Quantity: q.Shifts[k].Quantity, - } - - shifts[k] = shift - } - - queries[j].Shifts = shifts - } - - colors := make([]*Color, len(c.CellColors)) - for j, color := range c.CellColors { - colors[j] = &Color{ - ID: color.ID, - Type: color.Type, - Hex: color.Hex, - Name: color.Name, - Value: color.Value, - } - } - - axes := make(map[string]*Axis, len(c.Axes)) - for a, r := range c.Axes { - axes[a] = &Axis{ - Bounds: r.Bounds, - Label: r.Label, - Prefix: r.Prefix, - Suffix: r.Suffix, - Base: r.Base, - Scale: r.Scale, - } - } - - sortBy := &RenamableField{ - InternalName: c.TableOptions.SortBy.InternalName, - DisplayName: c.TableOptions.SortBy.DisplayName, - Visible: c.TableOptions.SortBy.Visible, - } - - tableOptions := &TableOptions{ - VerticalTimeAxis: c.TableOptions.VerticalTimeAxis, - SortBy: sortBy, - Wrapping: c.TableOptions.Wrapping, - FixFirstColumn: c.TableOptions.FixFirstColumn, - } - - decimalPlaces := &DecimalPlaces{ - IsEnforced: c.DecimalPlaces.IsEnforced, - Digits: c.DecimalPlaces.Digits, - } - - fieldOptions := make([]*RenamableField, len(c.FieldOptions)) - for i, field := range c.FieldOptions { - fieldOptions[i] = &RenamableField{ - InternalName: field.InternalName, - DisplayName: field.DisplayName, - Visible: field.Visible, - } - } - - cells[i] = &DashboardCell{ - ID: c.ID, - X: c.X, - Y: c.Y, - W: c.W, - H: c.H, - Name: c.Name, - Queries: queries, - Type: c.Type, - Axes: axes, - Colors: colors, - Legend: &Legend{ - Type: c.Legend.Type, - Orientation: c.Legend.Orientation, - }, - TableOptions: tableOptions, - FieldOptions: fieldOptions, - TimeFormat: c.TimeFormat, - DecimalPlaces: decimalPlaces, - } - } - templates := make([]*Template, len(d.Templates)) - for i, t := range d.Templates { - vals := make([]*TemplateValue, len(t.Values)) - for j, v := range t.Values { - vals[j] = &TemplateValue{ - Selected: v.Selected, - Type: v.Type, - Value: v.Value, - Key: v.Key, - } - } - - template := &Template{ - ID: string(t.ID), - TempVar: t.Var, - Values: vals, - Type: t.Type, - Label: t.Label, - } - if t.Query != nil { - template.Query = &TemplateQuery{ - Command: t.Query.Command, - Db: t.Query.DB, - Rp: t.Query.RP, - Measurement: t.Query.Measurement, - TagKey: t.Query.TagKey, - FieldKey: t.Query.FieldKey, - } - } - templates[i] = template - } - return proto.Marshal(&Dashboard{ - ID: int64(d.ID), - Cells: cells, - Templates: templates, - Name: d.Name, - Organization: d.Organization, - }) -} - -// UnmarshalDashboard decodes a layout from binary protobuf data. -func UnmarshalDashboard(data []byte, d *chronograf.Dashboard) error { - var pb Dashboard - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - - cells := make([]chronograf.DashboardCell, len(pb.Cells)) - for i, c := range pb.Cells { - queries := make([]chronograf.DashboardQuery, len(c.Queries)) - for j, q := range c.Queries { - queries[j] = chronograf.DashboardQuery{ - Command: q.Command, - Label: q.Label, - Source: q.Source, - Type: q.Type, - } - - if q.Range.Upper != q.Range.Lower { - queries[j].Range = &chronograf.Range{ - Upper: q.Range.Upper, - Lower: q.Range.Lower, - } - } - - shifts := make([]chronograf.TimeShift, len(q.Shifts)) - for k := range q.Shifts { - shift := chronograf.TimeShift{ - Label: q.Shifts[k].Label, - Unit: q.Shifts[k].Unit, - Quantity: q.Shifts[k].Quantity, - } - - shifts[k] = shift - } - - queries[j].Shifts = shifts - } - - colors := make([]chronograf.CellColor, len(c.Colors)) - for j, color := range c.Colors { - colors[j] = chronograf.CellColor{ - ID: color.ID, - Type: color.Type, - Hex: color.Hex, - Name: color.Name, - Value: color.Value, - } - } - - axes := make(map[string]chronograf.Axis, len(c.Axes)) - for a, r := range c.Axes { - // axis base defaults to 10 - if r.Base == "" { - r.Base = "10" - } - - if r.Scale == "" { - r.Scale = "linear" - } - - if r.Bounds != nil { - axes[a] = chronograf.Axis{ - Bounds: r.Bounds, - Label: r.Label, - Prefix: r.Prefix, - Suffix: r.Suffix, - Base: r.Base, - Scale: r.Scale, - } - - } else { - axes[a] = chronograf.Axis{ - Bounds: []string{}, - Base: r.Base, - Scale: r.Scale, - } - } - } - - legend := chronograf.Legend{} - if c.Legend != nil { - legend.Type = c.Legend.Type - legend.Orientation = c.Legend.Orientation - } - - tableOptions := chronograf.TableOptions{} - if c.TableOptions != nil { - sortBy := chronograf.RenamableField{} - if c.TableOptions.SortBy != nil { - sortBy.InternalName = c.TableOptions.SortBy.InternalName - sortBy.DisplayName = c.TableOptions.SortBy.DisplayName - sortBy.Visible = c.TableOptions.SortBy.Visible - } - tableOptions.SortBy = sortBy - tableOptions.VerticalTimeAxis = c.TableOptions.VerticalTimeAxis - tableOptions.Wrapping = c.TableOptions.Wrapping - tableOptions.FixFirstColumn = c.TableOptions.FixFirstColumn - } - - fieldOptions := make([]chronograf.RenamableField, len(c.FieldOptions)) - for i, field := range c.FieldOptions { - fieldOptions[i] = chronograf.RenamableField{} - fieldOptions[i].InternalName = field.InternalName - fieldOptions[i].DisplayName = field.DisplayName - fieldOptions[i].Visible = field.Visible - } - - decimalPlaces := chronograf.DecimalPlaces{} - if c.DecimalPlaces != nil { - decimalPlaces.IsEnforced = c.DecimalPlaces.IsEnforced - decimalPlaces.Digits = c.DecimalPlaces.Digits - } else { - decimalPlaces.IsEnforced = true - decimalPlaces.Digits = 2 - } - - // FIXME: this is merely for legacy cells and - // should be removed as soon as possible - cellType := c.Type - if cellType == "" { - cellType = "line" - } - - cells[i] = chronograf.DashboardCell{ - ID: c.ID, - X: c.X, - Y: c.Y, - W: c.W, - H: c.H, - Name: c.Name, - Queries: queries, - Type: cellType, - Axes: axes, - CellColors: colors, - Legend: legend, - TableOptions: tableOptions, - FieldOptions: fieldOptions, - TimeFormat: c.TimeFormat, - DecimalPlaces: decimalPlaces, - } - } - - templates := make([]chronograf.Template, len(pb.Templates)) - for i, t := range pb.Templates { - vals := make([]chronograf.TemplateValue, len(t.Values)) - for j, v := range t.Values { - vals[j] = chronograf.TemplateValue{ - Selected: v.Selected, - Type: v.Type, - Value: v.Value, - Key: v.Key, - } - } - - template := chronograf.Template{ - ID: chronograf.TemplateID(t.ID), - TemplateVar: chronograf.TemplateVar{ - Var: t.TempVar, - Values: vals, - }, - Type: t.Type, - Label: t.Label, - } - - if t.Query != nil { - template.Query = &chronograf.TemplateQuery{ - Command: t.Query.Command, - DB: t.Query.Db, - RP: t.Query.Rp, - Measurement: t.Query.Measurement, - TagKey: t.Query.TagKey, - FieldKey: t.Query.FieldKey, - } - } - templates[i] = template - } - - d.ID = chronograf.DashboardID(pb.ID) - d.Cells = cells - d.Templates = templates - d.Name = pb.Name - d.Organization = pb.Organization - return nil -} - -// ScopedAlert contains the source and the kapacitor id -type ScopedAlert struct { - chronograf.AlertRule - SrcID int - KapaID int -} - -// MarshalAlertRule encodes an alert rule to binary protobuf format. -func MarshalAlertRule(r *ScopedAlert) ([]byte, error) { - j, err := json.Marshal(r.AlertRule) - if err != nil { - return nil, err - } - return proto.Marshal(&AlertRule{ - ID: r.ID, - SrcID: int64(r.SrcID), - KapaID: int64(r.KapaID), - JSON: string(j), - }) -} - -// UnmarshalAlertRule decodes an alert rule from binary protobuf data. -func UnmarshalAlertRule(data []byte, r *ScopedAlert) error { - var pb AlertRule - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - - err := json.Unmarshal([]byte(pb.JSON), &r.AlertRule) - if err != nil { - return err - } - r.SrcID = int(pb.SrcID) - r.KapaID = int(pb.KapaID) - return nil -} - -// MarshalUser encodes a user to binary protobuf format. -// We are ignoring the password for now. -func MarshalUser(u *chronograf.User) ([]byte, error) { - roles := make([]*Role, len(u.Roles)) - for i, role := range u.Roles { - roles[i] = &Role{ - Organization: role.Organization, - Name: role.Name, - } - } - return MarshalUserPB(&User{ - ID: u.ID, - Name: u.Name, - Provider: u.Provider, - Scheme: u.Scheme, - Roles: roles, - SuperAdmin: u.SuperAdmin, - }) -} - -// MarshalUserPB encodes a user to binary protobuf format. -// We are ignoring the password for now. -func MarshalUserPB(u *User) ([]byte, error) { - return proto.Marshal(u) -} - -// UnmarshalUser decodes a user from binary protobuf data. -// We are ignoring the password for now. -func UnmarshalUser(data []byte, u *chronograf.User) error { - var pb User - if err := UnmarshalUserPB(data, &pb); err != nil { - return err - } - roles := make([]chronograf.Role, len(pb.Roles)) - for i, role := range pb.Roles { - roles[i] = chronograf.Role{ - Organization: role.Organization, - Name: role.Name, - } - } - u.ID = pb.ID - u.Name = pb.Name - u.Provider = pb.Provider - u.Scheme = pb.Scheme - u.SuperAdmin = pb.SuperAdmin - u.Roles = roles - - return nil -} - -// UnmarshalUserPB decodes a user from binary protobuf data. -// We are ignoring the password for now. -func UnmarshalUserPB(data []byte, u *User) error { - return proto.Unmarshal(data, u) -} - -// MarshalRole encodes a role to binary protobuf format. -func MarshalRole(r *chronograf.Role) ([]byte, error) { - return MarshalRolePB(&Role{ - Organization: r.Organization, - Name: r.Name, - }) -} - -// MarshalRolePB encodes a role to binary protobuf format. -func MarshalRolePB(r *Role) ([]byte, error) { - return proto.Marshal(r) -} - -// UnmarshalRole decodes a role from binary protobuf data. -func UnmarshalRole(data []byte, r *chronograf.Role) error { - var pb Role - if err := UnmarshalRolePB(data, &pb); err != nil { - return err - } - r.Organization = pb.Organization - r.Name = pb.Name - - return nil -} - -// UnmarshalRolePB decodes a role from binary protobuf data. -func UnmarshalRolePB(data []byte, r *Role) error { - return proto.Unmarshal(data, r) -} - -// MarshalOrganization encodes a organization to binary protobuf format. -func MarshalOrganization(o *chronograf.Organization) ([]byte, error) { - - return MarshalOrganizationPB(&Organization{ - ID: o.ID, - Name: o.Name, - DefaultRole: o.DefaultRole, - }) -} - -// MarshalOrganizationPB encodes a organization to binary protobuf format. -func MarshalOrganizationPB(o *Organization) ([]byte, error) { - return proto.Marshal(o) -} - -// UnmarshalOrganization decodes a organization from binary protobuf data. -func UnmarshalOrganization(data []byte, o *chronograf.Organization) error { - var pb Organization - if err := UnmarshalOrganizationPB(data, &pb); err != nil { - return err - } - o.ID = pb.ID - o.Name = pb.Name - o.DefaultRole = pb.DefaultRole - - return nil -} - -// UnmarshalOrganizationPB decodes a organization from binary protobuf data. -func UnmarshalOrganizationPB(data []byte, o *Organization) error { - return proto.Unmarshal(data, o) -} - -// MarshalConfig encodes a config to binary protobuf format. -func MarshalConfig(c *chronograf.Config) ([]byte, error) { - return MarshalConfigPB(&Config{ - Auth: &AuthConfig{ - SuperAdminNewUsers: c.Auth.SuperAdminNewUsers, - }, - }) -} - -// MarshalConfigPB encodes a config to binary protobuf format. -func MarshalConfigPB(c *Config) ([]byte, error) { - return proto.Marshal(c) -} - -// UnmarshalConfig decodes a config from binary protobuf data. -func UnmarshalConfig(data []byte, c *chronograf.Config) error { - var pb Config - if err := UnmarshalConfigPB(data, &pb); err != nil { - return err - } - if pb.Auth == nil { - return fmt.Errorf("auth config is nil") - } - c.Auth.SuperAdminNewUsers = pb.Auth.SuperAdminNewUsers - - return nil -} - -// UnmarshalConfigPB decodes a config from binary protobuf data. -func UnmarshalConfigPB(data []byte, c *Config) error { - return proto.Unmarshal(data, c) -} - -// MarshalOrganizationConfig encodes a config to binary protobuf format. -func MarshalOrganizationConfig(c *chronograf.OrganizationConfig) ([]byte, error) { - columns := make([]*LogViewerColumn, len(c.LogViewer.Columns)) - - for i, column := range c.LogViewer.Columns { - encodings := make([]*ColumnEncoding, len(column.Encodings)) - - for j, e := range column.Encodings { - encodings[j] = &ColumnEncoding{ - Type: e.Type, - Value: e.Value, - Name: e.Name, - } - } - - columns[i] = &LogViewerColumn{ - Name: column.Name, - Position: column.Position, - Encodings: encodings, - } - } - - return MarshalOrganizationConfigPB(&OrganizationConfig{ - OrganizationID: c.OrganizationID, - LogViewer: &LogViewerConfig{ - Columns: columns, - }, - }) -} - -// MarshalOrganizationConfigPB encodes a config to binary protobuf format. -func MarshalOrganizationConfigPB(c *OrganizationConfig) ([]byte, error) { - return proto.Marshal(c) -} - -// UnmarshalOrganizationConfig decodes a config from binary protobuf data. -func UnmarshalOrganizationConfig(data []byte, c *chronograf.OrganizationConfig) error { - var pb OrganizationConfig - - if err := UnmarshalOrganizationConfigPB(data, &pb); err != nil { - return err - } - - if pb.LogViewer == nil { - return fmt.Errorf("log Viewer config is nil") - } - - c.OrganizationID = pb.OrganizationID - - columns := make([]chronograf.LogViewerColumn, len(pb.LogViewer.Columns)) - - for i, c := range pb.LogViewer.Columns { - columns[i].Name = c.Name - columns[i].Position = c.Position - - encodings := make([]chronograf.ColumnEncoding, len(c.Encodings)) - for j, e := range c.Encodings { - encodings[j].Type = e.Type - encodings[j].Value = e.Value - encodings[j].Name = e.Name - } - - columns[i].Encodings = encodings - } - - c.LogViewer.Columns = columns - - return nil -} - -// UnmarshalOrganizationConfigPB decodes a config from binary protobuf data. -func UnmarshalOrganizationConfigPB(data []byte, c *OrganizationConfig) error { - return proto.Unmarshal(data, c) -} - -// MarshalMapping encodes a mapping to binary protobuf format. -func MarshalMapping(m *chronograf.Mapping) ([]byte, error) { - - return MarshalMappingPB(&Mapping{ - Provider: m.Provider, - Scheme: m.Scheme, - ProviderOrganization: m.ProviderOrganization, - ID: m.ID, - Organization: m.Organization, - }) -} - -// MarshalMappingPB encodes a mapping to binary protobuf format. -func MarshalMappingPB(m *Mapping) ([]byte, error) { - return proto.Marshal(m) -} - -// UnmarshalMapping decodes a mapping from binary protobuf data. -func UnmarshalMapping(data []byte, m *chronograf.Mapping) error { - var pb Mapping - if err := UnmarshalMappingPB(data, &pb); err != nil { - return err - } - - m.Provider = pb.Provider - m.Scheme = pb.Scheme - m.ProviderOrganization = pb.ProviderOrganization - m.Organization = pb.Organization - m.ID = pb.ID - - return nil -} - -// UnmarshalMappingPB decodes a mapping from binary protobuf data. -func UnmarshalMappingPB(data []byte, m *Mapping) error { - return proto.Unmarshal(data, m) -} diff --git a/chronograf/bolt/internal/internal.pb.go b/chronograf/bolt/internal/internal.pb.go deleted file mode 100644 index d79abfc7847..00000000000 --- a/chronograf/bolt/internal/internal.pb.go +++ /dev/null @@ -1,2260 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: internal.proto - -package internal - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Source struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Type string `protobuf:"bytes,3,opt,name=Type,proto3" json:"Type,omitempty"` - Username string `protobuf:"bytes,4,opt,name=Username,proto3" json:"Username,omitempty"` - Password string `protobuf:"bytes,5,opt,name=Password,proto3" json:"Password,omitempty"` - URL string `protobuf:"bytes,6,opt,name=URL,proto3" json:"URL,omitempty"` - Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"` - Telegraf string `protobuf:"bytes,8,opt,name=Telegraf,proto3" json:"Telegraf,omitempty"` - InsecureSkipVerify bool `protobuf:"varint,9,opt,name=InsecureSkipVerify,proto3" json:"InsecureSkipVerify,omitempty"` - MetaURL string `protobuf:"bytes,10,opt,name=MetaURL,proto3" json:"MetaURL,omitempty"` - SharedSecret string `protobuf:"bytes,11,opt,name=SharedSecret,proto3" json:"SharedSecret,omitempty"` - Organization string `protobuf:"bytes,12,opt,name=Organization,proto3" json:"Organization,omitempty"` - Role string `protobuf:"bytes,13,opt,name=Role,proto3" json:"Role,omitempty"` - DefaultRP string `protobuf:"bytes,14,opt,name=DefaultRP,proto3" json:"DefaultRP,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Source) Reset() { *m = Source{} } -func (m *Source) String() string { return proto.CompactTextString(m) } -func (*Source) ProtoMessage() {} -func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{0} -} -func (m *Source) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Source.Unmarshal(m, b) -} -func (m *Source) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Source.Marshal(b, m, deterministic) -} -func (m *Source) XXX_Merge(src proto.Message) { - xxx_messageInfo_Source.Merge(m, src) -} -func (m *Source) XXX_Size() int { - return xxx_messageInfo_Source.Size(m) -} -func (m *Source) XXX_DiscardUnknown() { - xxx_messageInfo_Source.DiscardUnknown(m) -} - -var xxx_messageInfo_Source proto.InternalMessageInfo - -func (m *Source) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Source) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Source) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Source) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *Source) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -func (m *Source) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -func (m *Source) GetDefault() bool { - if m != nil { - return m.Default - } - return false -} - -func (m *Source) GetTelegraf() string { - if m != nil { - return m.Telegraf - } - return "" -} - -func (m *Source) GetInsecureSkipVerify() bool { - if m != nil { - return m.InsecureSkipVerify - } - return false -} - -func (m *Source) GetMetaURL() string { - if m != nil { - return m.MetaURL - } - return "" -} - -func (m *Source) GetSharedSecret() string { - if m != nil { - return m.SharedSecret - } - return "" -} - -func (m *Source) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -func (m *Source) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *Source) GetDefaultRP() string { - if m != nil { - return m.DefaultRP - } - return "" -} - -type Dashboard struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Cells []*DashboardCell `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` - Templates []*Template `protobuf:"bytes,4,rep,name=templates,proto3" json:"templates,omitempty"` - Organization string `protobuf:"bytes,5,opt,name=Organization,proto3" json:"Organization,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Dashboard) Reset() { *m = Dashboard{} } -func (m *Dashboard) String() string { return proto.CompactTextString(m) } -func (*Dashboard) ProtoMessage() {} -func (*Dashboard) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{1} -} -func (m *Dashboard) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Dashboard.Unmarshal(m, b) -} -func (m *Dashboard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Dashboard.Marshal(b, m, deterministic) -} -func (m *Dashboard) XXX_Merge(src proto.Message) { - xxx_messageInfo_Dashboard.Merge(m, src) -} -func (m *Dashboard) XXX_Size() int { - return xxx_messageInfo_Dashboard.Size(m) -} -func (m *Dashboard) XXX_DiscardUnknown() { - xxx_messageInfo_Dashboard.DiscardUnknown(m) -} - -var xxx_messageInfo_Dashboard proto.InternalMessageInfo - -func (m *Dashboard) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Dashboard) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Dashboard) GetCells() []*DashboardCell { - if m != nil { - return m.Cells - } - return nil -} - -func (m *Dashboard) GetTemplates() []*Template { - if m != nil { - return m.Templates - } - return nil -} - -func (m *Dashboard) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -type DashboardCell struct { - X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"` - Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"` - W int32 `protobuf:"varint,3,opt,name=w,proto3" json:"w,omitempty"` - H int32 `protobuf:"varint,4,opt,name=h,proto3" json:"h,omitempty"` - Queries []*Query `protobuf:"bytes,5,rep,name=queries,proto3" json:"queries,omitempty"` - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,7,opt,name=type,proto3" json:"type,omitempty"` - ID string `protobuf:"bytes,8,opt,name=ID,proto3" json:"ID,omitempty"` - Axes map[string]*Axis `protobuf:"bytes,9,rep,name=axes,proto3" json:"axes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Colors []*Color `protobuf:"bytes,10,rep,name=colors,proto3" json:"colors,omitempty"` - Legend *Legend `protobuf:"bytes,11,opt,name=legend,proto3" json:"legend,omitempty"` - TableOptions *TableOptions `protobuf:"bytes,12,opt,name=tableOptions,proto3" json:"tableOptions,omitempty"` - FieldOptions []*RenamableField `protobuf:"bytes,13,rep,name=fieldOptions,proto3" json:"fieldOptions,omitempty"` - TimeFormat string `protobuf:"bytes,14,opt,name=timeFormat,proto3" json:"timeFormat,omitempty"` - DecimalPlaces *DecimalPlaces `protobuf:"bytes,15,opt,name=decimalPlaces,proto3" json:"decimalPlaces,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DashboardCell) Reset() { *m = DashboardCell{} } -func (m *DashboardCell) String() string { return proto.CompactTextString(m) } -func (*DashboardCell) ProtoMessage() {} -func (*DashboardCell) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{2} -} -func (m *DashboardCell) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DashboardCell.Unmarshal(m, b) -} -func (m *DashboardCell) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DashboardCell.Marshal(b, m, deterministic) -} -func (m *DashboardCell) XXX_Merge(src proto.Message) { - xxx_messageInfo_DashboardCell.Merge(m, src) -} -func (m *DashboardCell) XXX_Size() int { - return xxx_messageInfo_DashboardCell.Size(m) -} -func (m *DashboardCell) XXX_DiscardUnknown() { - xxx_messageInfo_DashboardCell.DiscardUnknown(m) -} - -var xxx_messageInfo_DashboardCell proto.InternalMessageInfo - -func (m *DashboardCell) GetX() int32 { - if m != nil { - return m.X - } - return 0 -} - -func (m *DashboardCell) GetY() int32 { - if m != nil { - return m.Y - } - return 0 -} - -func (m *DashboardCell) GetW() int32 { - if m != nil { - return m.W - } - return 0 -} - -func (m *DashboardCell) GetH() int32 { - if m != nil { - return m.H - } - return 0 -} - -func (m *DashboardCell) GetQueries() []*Query { - if m != nil { - return m.Queries - } - return nil -} - -func (m *DashboardCell) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *DashboardCell) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *DashboardCell) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *DashboardCell) GetAxes() map[string]*Axis { - if m != nil { - return m.Axes - } - return nil -} - -func (m *DashboardCell) GetColors() []*Color { - if m != nil { - return m.Colors - } - return nil -} - -func (m *DashboardCell) GetLegend() *Legend { - if m != nil { - return m.Legend - } - return nil -} - -func (m *DashboardCell) GetTableOptions() *TableOptions { - if m != nil { - return m.TableOptions - } - return nil -} - -func (m *DashboardCell) GetFieldOptions() []*RenamableField { - if m != nil { - return m.FieldOptions - } - return nil -} - -func (m *DashboardCell) GetTimeFormat() string { - if m != nil { - return m.TimeFormat - } - return "" -} - -func (m *DashboardCell) GetDecimalPlaces() *DecimalPlaces { - if m != nil { - return m.DecimalPlaces - } - return nil -} - -type DecimalPlaces struct { - IsEnforced bool `protobuf:"varint,1,opt,name=isEnforced,proto3" json:"isEnforced,omitempty"` - Digits int32 `protobuf:"varint,2,opt,name=digits,proto3" json:"digits,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DecimalPlaces) Reset() { *m = DecimalPlaces{} } -func (m *DecimalPlaces) String() string { return proto.CompactTextString(m) } -func (*DecimalPlaces) ProtoMessage() {} -func (*DecimalPlaces) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{3} -} -func (m *DecimalPlaces) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DecimalPlaces.Unmarshal(m, b) -} -func (m *DecimalPlaces) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DecimalPlaces.Marshal(b, m, deterministic) -} -func (m *DecimalPlaces) XXX_Merge(src proto.Message) { - xxx_messageInfo_DecimalPlaces.Merge(m, src) -} -func (m *DecimalPlaces) XXX_Size() int { - return xxx_messageInfo_DecimalPlaces.Size(m) -} -func (m *DecimalPlaces) XXX_DiscardUnknown() { - xxx_messageInfo_DecimalPlaces.DiscardUnknown(m) -} - -var xxx_messageInfo_DecimalPlaces proto.InternalMessageInfo - -func (m *DecimalPlaces) GetIsEnforced() bool { - if m != nil { - return m.IsEnforced - } - return false -} - -func (m *DecimalPlaces) GetDigits() int32 { - if m != nil { - return m.Digits - } - return 0 -} - -type TableOptions struct { - VerticalTimeAxis bool `protobuf:"varint,2,opt,name=verticalTimeAxis,proto3" json:"verticalTimeAxis,omitempty"` - SortBy *RenamableField `protobuf:"bytes,3,opt,name=sortBy,proto3" json:"sortBy,omitempty"` - Wrapping string `protobuf:"bytes,4,opt,name=wrapping,proto3" json:"wrapping,omitempty"` - FixFirstColumn bool `protobuf:"varint,6,opt,name=fixFirstColumn,proto3" json:"fixFirstColumn,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (m *TableOptions) String() string { return proto.CompactTextString(m) } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{4} -} -func (m *TableOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TableOptions.Unmarshal(m, b) -} -func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TableOptions.Marshal(b, m, deterministic) -} -func (m *TableOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_TableOptions.Merge(m, src) -} -func (m *TableOptions) XXX_Size() int { - return xxx_messageInfo_TableOptions.Size(m) -} -func (m *TableOptions) XXX_DiscardUnknown() { - xxx_messageInfo_TableOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_TableOptions proto.InternalMessageInfo - -func (m *TableOptions) GetVerticalTimeAxis() bool { - if m != nil { - return m.VerticalTimeAxis - } - return false -} - -func (m *TableOptions) GetSortBy() *RenamableField { - if m != nil { - return m.SortBy - } - return nil -} - -func (m *TableOptions) GetWrapping() string { - if m != nil { - return m.Wrapping - } - return "" -} - -func (m *TableOptions) GetFixFirstColumn() bool { - if m != nil { - return m.FixFirstColumn - } - return false -} - -type RenamableField struct { - InternalName string `protobuf:"bytes,1,opt,name=internalName,proto3" json:"internalName,omitempty"` - DisplayName string `protobuf:"bytes,2,opt,name=displayName,proto3" json:"displayName,omitempty"` - Visible bool `protobuf:"varint,3,opt,name=visible,proto3" json:"visible,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RenamableField) Reset() { *m = RenamableField{} } -func (m *RenamableField) String() string { return proto.CompactTextString(m) } -func (*RenamableField) ProtoMessage() {} -func (*RenamableField) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{5} -} -func (m *RenamableField) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RenamableField.Unmarshal(m, b) -} -func (m *RenamableField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RenamableField.Marshal(b, m, deterministic) -} -func (m *RenamableField) XXX_Merge(src proto.Message) { - xxx_messageInfo_RenamableField.Merge(m, src) -} -func (m *RenamableField) XXX_Size() int { - return xxx_messageInfo_RenamableField.Size(m) -} -func (m *RenamableField) XXX_DiscardUnknown() { - xxx_messageInfo_RenamableField.DiscardUnknown(m) -} - -var xxx_messageInfo_RenamableField proto.InternalMessageInfo - -func (m *RenamableField) GetInternalName() string { - if m != nil { - return m.InternalName - } - return "" -} - -func (m *RenamableField) GetDisplayName() string { - if m != nil { - return m.DisplayName - } - return "" -} - -func (m *RenamableField) GetVisible() bool { - if m != nil { - return m.Visible - } - return false -} - -type Color struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Type string `protobuf:"bytes,2,opt,name=Type,proto3" json:"Type,omitempty"` - Hex string `protobuf:"bytes,3,opt,name=Hex,proto3" json:"Hex,omitempty"` - Name string `protobuf:"bytes,4,opt,name=Name,proto3" json:"Name,omitempty"` - Value string `protobuf:"bytes,5,opt,name=Value,proto3" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Color) Reset() { *m = Color{} } -func (m *Color) String() string { return proto.CompactTextString(m) } -func (*Color) ProtoMessage() {} -func (*Color) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{6} -} -func (m *Color) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Color.Unmarshal(m, b) -} -func (m *Color) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Color.Marshal(b, m, deterministic) -} -func (m *Color) XXX_Merge(src proto.Message) { - xxx_messageInfo_Color.Merge(m, src) -} -func (m *Color) XXX_Size() int { - return xxx_messageInfo_Color.Size(m) -} -func (m *Color) XXX_DiscardUnknown() { - xxx_messageInfo_Color.DiscardUnknown(m) -} - -var xxx_messageInfo_Color proto.InternalMessageInfo - -func (m *Color) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Color) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Color) GetHex() string { - if m != nil { - return m.Hex - } - return "" -} - -func (m *Color) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Color) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type Legend struct { - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` - Orientation string `protobuf:"bytes,2,opt,name=Orientation,proto3" json:"Orientation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Legend) Reset() { *m = Legend{} } -func (m *Legend) String() string { return proto.CompactTextString(m) } -func (*Legend) ProtoMessage() {} -func (*Legend) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{7} -} -func (m *Legend) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Legend.Unmarshal(m, b) -} -func (m *Legend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Legend.Marshal(b, m, deterministic) -} -func (m *Legend) XXX_Merge(src proto.Message) { - xxx_messageInfo_Legend.Merge(m, src) -} -func (m *Legend) XXX_Size() int { - return xxx_messageInfo_Legend.Size(m) -} -func (m *Legend) XXX_DiscardUnknown() { - xxx_messageInfo_Legend.DiscardUnknown(m) -} - -var xxx_messageInfo_Legend proto.InternalMessageInfo - -func (m *Legend) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Legend) GetOrientation() string { - if m != nil { - return m.Orientation - } - return "" -} - -type Axis struct { - LegacyBounds []int64 `protobuf:"varint,1,rep,packed,name=legacyBounds,proto3" json:"legacyBounds,omitempty"` - Bounds []string `protobuf:"bytes,2,rep,name=bounds,proto3" json:"bounds,omitempty"` - Label string `protobuf:"bytes,3,opt,name=label,proto3" json:"label,omitempty"` - Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` - Suffix string `protobuf:"bytes,5,opt,name=suffix,proto3" json:"suffix,omitempty"` - Base string `protobuf:"bytes,6,opt,name=base,proto3" json:"base,omitempty"` - Scale string `protobuf:"bytes,7,opt,name=scale,proto3" json:"scale,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Axis) Reset() { *m = Axis{} } -func (m *Axis) String() string { return proto.CompactTextString(m) } -func (*Axis) ProtoMessage() {} -func (*Axis) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{8} -} -func (m *Axis) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Axis.Unmarshal(m, b) -} -func (m *Axis) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Axis.Marshal(b, m, deterministic) -} -func (m *Axis) XXX_Merge(src proto.Message) { - xxx_messageInfo_Axis.Merge(m, src) -} -func (m *Axis) XXX_Size() int { - return xxx_messageInfo_Axis.Size(m) -} -func (m *Axis) XXX_DiscardUnknown() { - xxx_messageInfo_Axis.DiscardUnknown(m) -} - -var xxx_messageInfo_Axis proto.InternalMessageInfo - -func (m *Axis) GetLegacyBounds() []int64 { - if m != nil { - return m.LegacyBounds - } - return nil -} - -func (m *Axis) GetBounds() []string { - if m != nil { - return m.Bounds - } - return nil -} - -func (m *Axis) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *Axis) GetPrefix() string { - if m != nil { - return m.Prefix - } - return "" -} - -func (m *Axis) GetSuffix() string { - if m != nil { - return m.Suffix - } - return "" -} - -func (m *Axis) GetBase() string { - if m != nil { - return m.Base - } - return "" -} - -func (m *Axis) GetScale() string { - if m != nil { - return m.Scale - } - return "" -} - -type Template struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - TempVar string `protobuf:"bytes,2,opt,name=temp_var,json=tempVar,proto3" json:"temp_var,omitempty"` - Values []*TemplateValue `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` - Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` - Label string `protobuf:"bytes,5,opt,name=label,proto3" json:"label,omitempty"` - Query *TemplateQuery `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Template) Reset() { *m = Template{} } -func (m *Template) String() string { return proto.CompactTextString(m) } -func (*Template) ProtoMessage() {} -func (*Template) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{9} -} -func (m *Template) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Template.Unmarshal(m, b) -} -func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Template.Marshal(b, m, deterministic) -} -func (m *Template) XXX_Merge(src proto.Message) { - xxx_messageInfo_Template.Merge(m, src) -} -func (m *Template) XXX_Size() int { - return xxx_messageInfo_Template.Size(m) -} -func (m *Template) XXX_DiscardUnknown() { - xxx_messageInfo_Template.DiscardUnknown(m) -} - -var xxx_messageInfo_Template proto.InternalMessageInfo - -func (m *Template) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Template) GetTempVar() string { - if m != nil { - return m.TempVar - } - return "" -} - -func (m *Template) GetValues() []*TemplateValue { - if m != nil { - return m.Values - } - return nil -} - -func (m *Template) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Template) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *Template) GetQuery() *TemplateQuery { - if m != nil { - return m.Query - } - return nil -} - -type TemplateValue struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Selected bool `protobuf:"varint,3,opt,name=selected,proto3" json:"selected,omitempty"` - Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TemplateValue) Reset() { *m = TemplateValue{} } -func (m *TemplateValue) String() string { return proto.CompactTextString(m) } -func (*TemplateValue) ProtoMessage() {} -func (*TemplateValue) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{10} -} -func (m *TemplateValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TemplateValue.Unmarshal(m, b) -} -func (m *TemplateValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TemplateValue.Marshal(b, m, deterministic) -} -func (m *TemplateValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_TemplateValue.Merge(m, src) -} -func (m *TemplateValue) XXX_Size() int { - return xxx_messageInfo_TemplateValue.Size(m) -} -func (m *TemplateValue) XXX_DiscardUnknown() { - xxx_messageInfo_TemplateValue.DiscardUnknown(m) -} - -var xxx_messageInfo_TemplateValue proto.InternalMessageInfo - -func (m *TemplateValue) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *TemplateValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *TemplateValue) GetSelected() bool { - if m != nil { - return m.Selected - } - return false -} - -func (m *TemplateValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type TemplateQuery struct { - Command string `protobuf:"bytes,1,opt,name=command,proto3" json:"command,omitempty"` - Db string `protobuf:"bytes,2,opt,name=db,proto3" json:"db,omitempty"` - Rp string `protobuf:"bytes,3,opt,name=rp,proto3" json:"rp,omitempty"` - Measurement string `protobuf:"bytes,4,opt,name=measurement,proto3" json:"measurement,omitempty"` - TagKey string `protobuf:"bytes,5,opt,name=tag_key,json=tagKey,proto3" json:"tag_key,omitempty"` - FieldKey string `protobuf:"bytes,6,opt,name=field_key,json=fieldKey,proto3" json:"field_key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TemplateQuery) Reset() { *m = TemplateQuery{} } -func (m *TemplateQuery) String() string { return proto.CompactTextString(m) } -func (*TemplateQuery) ProtoMessage() {} -func (*TemplateQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{11} -} -func (m *TemplateQuery) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TemplateQuery.Unmarshal(m, b) -} -func (m *TemplateQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TemplateQuery.Marshal(b, m, deterministic) -} -func (m *TemplateQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_TemplateQuery.Merge(m, src) -} -func (m *TemplateQuery) XXX_Size() int { - return xxx_messageInfo_TemplateQuery.Size(m) -} -func (m *TemplateQuery) XXX_DiscardUnknown() { - xxx_messageInfo_TemplateQuery.DiscardUnknown(m) -} - -var xxx_messageInfo_TemplateQuery proto.InternalMessageInfo - -func (m *TemplateQuery) GetCommand() string { - if m != nil { - return m.Command - } - return "" -} - -func (m *TemplateQuery) GetDb() string { - if m != nil { - return m.Db - } - return "" -} - -func (m *TemplateQuery) GetRp() string { - if m != nil { - return m.Rp - } - return "" -} - -func (m *TemplateQuery) GetMeasurement() string { - if m != nil { - return m.Measurement - } - return "" -} - -func (m *TemplateQuery) GetTagKey() string { - if m != nil { - return m.TagKey - } - return "" -} - -func (m *TemplateQuery) GetFieldKey() string { - if m != nil { - return m.FieldKey - } - return "" -} - -type Server struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Username string `protobuf:"bytes,3,opt,name=Username,proto3" json:"Username,omitempty"` - Password string `protobuf:"bytes,4,opt,name=Password,proto3" json:"Password,omitempty"` - URL string `protobuf:"bytes,5,opt,name=URL,proto3" json:"URL,omitempty"` - SrcID int64 `protobuf:"varint,6,opt,name=SrcID,proto3" json:"SrcID,omitempty"` - Active bool `protobuf:"varint,7,opt,name=Active,proto3" json:"Active,omitempty"` - Organization string `protobuf:"bytes,8,opt,name=Organization,proto3" json:"Organization,omitempty"` - InsecureSkipVerify bool `protobuf:"varint,9,opt,name=InsecureSkipVerify,proto3" json:"InsecureSkipVerify,omitempty"` - Type string `protobuf:"bytes,10,opt,name=Type,proto3" json:"Type,omitempty"` - MetadataJSON string `protobuf:"bytes,11,opt,name=MetadataJSON,proto3" json:"MetadataJSON,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Server) Reset() { *m = Server{} } -func (m *Server) String() string { return proto.CompactTextString(m) } -func (*Server) ProtoMessage() {} -func (*Server) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{12} -} -func (m *Server) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Server.Unmarshal(m, b) -} -func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Server.Marshal(b, m, deterministic) -} -func (m *Server) XXX_Merge(src proto.Message) { - xxx_messageInfo_Server.Merge(m, src) -} -func (m *Server) XXX_Size() int { - return xxx_messageInfo_Server.Size(m) -} -func (m *Server) XXX_DiscardUnknown() { - xxx_messageInfo_Server.DiscardUnknown(m) -} - -var xxx_messageInfo_Server proto.InternalMessageInfo - -func (m *Server) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Server) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Server) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *Server) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -func (m *Server) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -func (m *Server) GetSrcID() int64 { - if m != nil { - return m.SrcID - } - return 0 -} - -func (m *Server) GetActive() bool { - if m != nil { - return m.Active - } - return false -} - -func (m *Server) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -func (m *Server) GetInsecureSkipVerify() bool { - if m != nil { - return m.InsecureSkipVerify - } - return false -} - -func (m *Server) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Server) GetMetadataJSON() string { - if m != nil { - return m.MetadataJSON - } - return "" -} - -type Layout struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Application string `protobuf:"bytes,2,opt,name=Application,proto3" json:"Application,omitempty"` - Measurement string `protobuf:"bytes,3,opt,name=Measurement,proto3" json:"Measurement,omitempty"` - Cells []*Cell `protobuf:"bytes,4,rep,name=Cells,proto3" json:"Cells,omitempty"` - Autoflow bool `protobuf:"varint,5,opt,name=Autoflow,proto3" json:"Autoflow,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Layout) Reset() { *m = Layout{} } -func (m *Layout) String() string { return proto.CompactTextString(m) } -func (*Layout) ProtoMessage() {} -func (*Layout) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{13} -} -func (m *Layout) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Layout.Unmarshal(m, b) -} -func (m *Layout) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Layout.Marshal(b, m, deterministic) -} -func (m *Layout) XXX_Merge(src proto.Message) { - xxx_messageInfo_Layout.Merge(m, src) -} -func (m *Layout) XXX_Size() int { - return xxx_messageInfo_Layout.Size(m) -} -func (m *Layout) XXX_DiscardUnknown() { - xxx_messageInfo_Layout.DiscardUnknown(m) -} - -var xxx_messageInfo_Layout proto.InternalMessageInfo - -func (m *Layout) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Layout) GetApplication() string { - if m != nil { - return m.Application - } - return "" -} - -func (m *Layout) GetMeasurement() string { - if m != nil { - return m.Measurement - } - return "" -} - -func (m *Layout) GetCells() []*Cell { - if m != nil { - return m.Cells - } - return nil -} - -func (m *Layout) GetAutoflow() bool { - if m != nil { - return m.Autoflow - } - return false -} - -type Cell struct { - X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"` - Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"` - W int32 `protobuf:"varint,3,opt,name=w,proto3" json:"w,omitempty"` - H int32 `protobuf:"varint,4,opt,name=h,proto3" json:"h,omitempty"` - Queries []*Query `protobuf:"bytes,5,rep,name=queries,proto3" json:"queries,omitempty"` - I string `protobuf:"bytes,6,opt,name=i,proto3" json:"i,omitempty"` - Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` - Yranges []int64 `protobuf:"varint,8,rep,packed,name=yranges,proto3" json:"yranges,omitempty"` - Ylabels []string `protobuf:"bytes,9,rep,name=ylabels,proto3" json:"ylabels,omitempty"` - Type string `protobuf:"bytes,10,opt,name=type,proto3" json:"type,omitempty"` - Axes map[string]*Axis `protobuf:"bytes,11,rep,name=axes,proto3" json:"axes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cell) Reset() { *m = Cell{} } -func (m *Cell) String() string { return proto.CompactTextString(m) } -func (*Cell) ProtoMessage() {} -func (*Cell) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{14} -} -func (m *Cell) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cell.Unmarshal(m, b) -} -func (m *Cell) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cell.Marshal(b, m, deterministic) -} -func (m *Cell) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cell.Merge(m, src) -} -func (m *Cell) XXX_Size() int { - return xxx_messageInfo_Cell.Size(m) -} -func (m *Cell) XXX_DiscardUnknown() { - xxx_messageInfo_Cell.DiscardUnknown(m) -} - -var xxx_messageInfo_Cell proto.InternalMessageInfo - -func (m *Cell) GetX() int32 { - if m != nil { - return m.X - } - return 0 -} - -func (m *Cell) GetY() int32 { - if m != nil { - return m.Y - } - return 0 -} - -func (m *Cell) GetW() int32 { - if m != nil { - return m.W - } - return 0 -} - -func (m *Cell) GetH() int32 { - if m != nil { - return m.H - } - return 0 -} - -func (m *Cell) GetQueries() []*Query { - if m != nil { - return m.Queries - } - return nil -} - -func (m *Cell) GetI() string { - if m != nil { - return m.I - } - return "" -} - -func (m *Cell) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Cell) GetYranges() []int64 { - if m != nil { - return m.Yranges - } - return nil -} - -func (m *Cell) GetYlabels() []string { - if m != nil { - return m.Ylabels - } - return nil -} - -func (m *Cell) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Cell) GetAxes() map[string]*Axis { - if m != nil { - return m.Axes - } - return nil -} - -type Query struct { - Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"Command,omitempty"` - DB string `protobuf:"bytes,2,opt,name=DB,proto3" json:"DB,omitempty"` - RP string `protobuf:"bytes,3,opt,name=RP,proto3" json:"RP,omitempty"` - GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys,proto3" json:"GroupBys,omitempty"` - Wheres []string `protobuf:"bytes,5,rep,name=Wheres,proto3" json:"Wheres,omitempty"` - Label string `protobuf:"bytes,6,opt,name=Label,proto3" json:"Label,omitempty"` - Range *Range `protobuf:"bytes,7,opt,name=Range,proto3" json:"Range,omitempty"` - Source string `protobuf:"bytes,8,opt,name=Source,proto3" json:"Source,omitempty"` - Shifts []*TimeShift `protobuf:"bytes,9,rep,name=Shifts,proto3" json:"Shifts,omitempty"` - Type string `protobuf:"bytes,10,opt,name=Type,proto3" json:"Type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{15} -} -func (m *Query) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query.Unmarshal(m, b) -} -func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query.Marshal(b, m, deterministic) -} -func (m *Query) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query.Merge(m, src) -} -func (m *Query) XXX_Size() int { - return xxx_messageInfo_Query.Size(m) -} -func (m *Query) XXX_DiscardUnknown() { - xxx_messageInfo_Query.DiscardUnknown(m) -} - -var xxx_messageInfo_Query proto.InternalMessageInfo - -func (m *Query) GetCommand() string { - if m != nil { - return m.Command - } - return "" -} - -func (m *Query) GetDB() string { - if m != nil { - return m.DB - } - return "" -} - -func (m *Query) GetRP() string { - if m != nil { - return m.RP - } - return "" -} - -func (m *Query) GetGroupBys() []string { - if m != nil { - return m.GroupBys - } - return nil -} - -func (m *Query) GetWheres() []string { - if m != nil { - return m.Wheres - } - return nil -} - -func (m *Query) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *Query) GetRange() *Range { - if m != nil { - return m.Range - } - return nil -} - -func (m *Query) GetSource() string { - if m != nil { - return m.Source - } - return "" -} - -func (m *Query) GetShifts() []*TimeShift { - if m != nil { - return m.Shifts - } - return nil -} - -func (m *Query) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -type TimeShift struct { - Label string `protobuf:"bytes,1,opt,name=Label,proto3" json:"Label,omitempty"` - Unit string `protobuf:"bytes,2,opt,name=Unit,proto3" json:"Unit,omitempty"` - Quantity string `protobuf:"bytes,3,opt,name=Quantity,proto3" json:"Quantity,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TimeShift) Reset() { *m = TimeShift{} } -func (m *TimeShift) String() string { return proto.CompactTextString(m) } -func (*TimeShift) ProtoMessage() {} -func (*TimeShift) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{16} -} -func (m *TimeShift) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TimeShift.Unmarshal(m, b) -} -func (m *TimeShift) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TimeShift.Marshal(b, m, deterministic) -} -func (m *TimeShift) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeShift.Merge(m, src) -} -func (m *TimeShift) XXX_Size() int { - return xxx_messageInfo_TimeShift.Size(m) -} -func (m *TimeShift) XXX_DiscardUnknown() { - xxx_messageInfo_TimeShift.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeShift proto.InternalMessageInfo - -func (m *TimeShift) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *TimeShift) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *TimeShift) GetQuantity() string { - if m != nil { - return m.Quantity - } - return "" -} - -type Range struct { - Upper int64 `protobuf:"varint,1,opt,name=Upper,proto3" json:"Upper,omitempty"` - Lower int64 `protobuf:"varint,2,opt,name=Lower,proto3" json:"Lower,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Range) Reset() { *m = Range{} } -func (m *Range) String() string { return proto.CompactTextString(m) } -func (*Range) ProtoMessage() {} -func (*Range) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{17} -} -func (m *Range) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Range.Unmarshal(m, b) -} -func (m *Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Range.Marshal(b, m, deterministic) -} -func (m *Range) XXX_Merge(src proto.Message) { - xxx_messageInfo_Range.Merge(m, src) -} -func (m *Range) XXX_Size() int { - return xxx_messageInfo_Range.Size(m) -} -func (m *Range) XXX_DiscardUnknown() { - xxx_messageInfo_Range.DiscardUnknown(m) -} - -var xxx_messageInfo_Range proto.InternalMessageInfo - -func (m *Range) GetUpper() int64 { - if m != nil { - return m.Upper - } - return 0 -} - -func (m *Range) GetLower() int64 { - if m != nil { - return m.Lower - } - return 0 -} - -type AlertRule struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - JSON string `protobuf:"bytes,2,opt,name=JSON,proto3" json:"JSON,omitempty"` - SrcID int64 `protobuf:"varint,3,opt,name=SrcID,proto3" json:"SrcID,omitempty"` - KapaID int64 `protobuf:"varint,4,opt,name=KapaID,proto3" json:"KapaID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlertRule) Reset() { *m = AlertRule{} } -func (m *AlertRule) String() string { return proto.CompactTextString(m) } -func (*AlertRule) ProtoMessage() {} -func (*AlertRule) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{18} -} -func (m *AlertRule) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AlertRule.Unmarshal(m, b) -} -func (m *AlertRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AlertRule.Marshal(b, m, deterministic) -} -func (m *AlertRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlertRule.Merge(m, src) -} -func (m *AlertRule) XXX_Size() int { - return xxx_messageInfo_AlertRule.Size(m) -} -func (m *AlertRule) XXX_DiscardUnknown() { - xxx_messageInfo_AlertRule.DiscardUnknown(m) -} - -var xxx_messageInfo_AlertRule proto.InternalMessageInfo - -func (m *AlertRule) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *AlertRule) GetJSON() string { - if m != nil { - return m.JSON - } - return "" -} - -func (m *AlertRule) GetSrcID() int64 { - if m != nil { - return m.SrcID - } - return 0 -} - -func (m *AlertRule) GetKapaID() int64 { - if m != nil { - return m.KapaID - } - return 0 -} - -type User struct { - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Provider string `protobuf:"bytes,3,opt,name=Provider,proto3" json:"Provider,omitempty"` - Scheme string `protobuf:"bytes,4,opt,name=Scheme,proto3" json:"Scheme,omitempty"` - Roles []*Role `protobuf:"bytes,5,rep,name=Roles,proto3" json:"Roles,omitempty"` - SuperAdmin bool `protobuf:"varint,6,opt,name=SuperAdmin,proto3" json:"SuperAdmin,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{19} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_User.Unmarshal(m, b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_User.Marshal(b, m, deterministic) -} -func (m *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(m, src) -} -func (m *User) XXX_Size() int { - return xxx_messageInfo_User.Size(m) -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo - -func (m *User) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *User) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *User) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *User) GetScheme() string { - if m != nil { - return m.Scheme - } - return "" -} - -func (m *User) GetRoles() []*Role { - if m != nil { - return m.Roles - } - return nil -} - -func (m *User) GetSuperAdmin() bool { - if m != nil { - return m.SuperAdmin - } - return false -} - -type Role struct { - Organization string `protobuf:"bytes,1,opt,name=Organization,proto3" json:"Organization,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{20} -} -func (m *Role) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Role.Unmarshal(m, b) -} -func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Role.Marshal(b, m, deterministic) -} -func (m *Role) XXX_Merge(src proto.Message) { - xxx_messageInfo_Role.Merge(m, src) -} -func (m *Role) XXX_Size() int { - return xxx_messageInfo_Role.Size(m) -} -func (m *Role) XXX_DiscardUnknown() { - xxx_messageInfo_Role.DiscardUnknown(m) -} - -var xxx_messageInfo_Role proto.InternalMessageInfo - -func (m *Role) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -func (m *Role) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type Mapping struct { - Provider string `protobuf:"bytes,1,opt,name=Provider,proto3" json:"Provider,omitempty"` - Scheme string `protobuf:"bytes,2,opt,name=Scheme,proto3" json:"Scheme,omitempty"` - ProviderOrganization string `protobuf:"bytes,3,opt,name=ProviderOrganization,proto3" json:"ProviderOrganization,omitempty"` - ID string `protobuf:"bytes,4,opt,name=ID,proto3" json:"ID,omitempty"` - Organization string `protobuf:"bytes,5,opt,name=Organization,proto3" json:"Organization,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Mapping) Reset() { *m = Mapping{} } -func (m *Mapping) String() string { return proto.CompactTextString(m) } -func (*Mapping) ProtoMessage() {} -func (*Mapping) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{21} -} -func (m *Mapping) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Mapping.Unmarshal(m, b) -} -func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Mapping.Marshal(b, m, deterministic) -} -func (m *Mapping) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mapping.Merge(m, src) -} -func (m *Mapping) XXX_Size() int { - return xxx_messageInfo_Mapping.Size(m) -} -func (m *Mapping) XXX_DiscardUnknown() { - xxx_messageInfo_Mapping.DiscardUnknown(m) -} - -var xxx_messageInfo_Mapping proto.InternalMessageInfo - -func (m *Mapping) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *Mapping) GetScheme() string { - if m != nil { - return m.Scheme - } - return "" -} - -func (m *Mapping) GetProviderOrganization() string { - if m != nil { - return m.ProviderOrganization - } - return "" -} - -func (m *Mapping) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Mapping) GetOrganization() string { - if m != nil { - return m.Organization - } - return "" -} - -type Organization struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - DefaultRole string `protobuf:"bytes,3,opt,name=DefaultRole,proto3" json:"DefaultRole,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Organization) Reset() { *m = Organization{} } -func (m *Organization) String() string { return proto.CompactTextString(m) } -func (*Organization) ProtoMessage() {} -func (*Organization) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{22} -} -func (m *Organization) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Organization.Unmarshal(m, b) -} -func (m *Organization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Organization.Marshal(b, m, deterministic) -} -func (m *Organization) XXX_Merge(src proto.Message) { - xxx_messageInfo_Organization.Merge(m, src) -} -func (m *Organization) XXX_Size() int { - return xxx_messageInfo_Organization.Size(m) -} -func (m *Organization) XXX_DiscardUnknown() { - xxx_messageInfo_Organization.DiscardUnknown(m) -} - -var xxx_messageInfo_Organization proto.InternalMessageInfo - -func (m *Organization) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *Organization) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Organization) GetDefaultRole() string { - if m != nil { - return m.DefaultRole - } - return "" -} - -type Config struct { - Auth *AuthConfig `protobuf:"bytes,1,opt,name=Auth,proto3" json:"Auth,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Config) Reset() { *m = Config{} } -func (m *Config) String() string { return proto.CompactTextString(m) } -func (*Config) ProtoMessage() {} -func (*Config) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{23} -} -func (m *Config) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Config.Unmarshal(m, b) -} -func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Config.Marshal(b, m, deterministic) -} -func (m *Config) XXX_Merge(src proto.Message) { - xxx_messageInfo_Config.Merge(m, src) -} -func (m *Config) XXX_Size() int { - return xxx_messageInfo_Config.Size(m) -} -func (m *Config) XXX_DiscardUnknown() { - xxx_messageInfo_Config.DiscardUnknown(m) -} - -var xxx_messageInfo_Config proto.InternalMessageInfo - -func (m *Config) GetAuth() *AuthConfig { - if m != nil { - return m.Auth - } - return nil -} - -type AuthConfig struct { - SuperAdminNewUsers bool `protobuf:"varint,1,opt,name=SuperAdminNewUsers,proto3" json:"SuperAdminNewUsers,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthConfig) Reset() { *m = AuthConfig{} } -func (m *AuthConfig) String() string { return proto.CompactTextString(m) } -func (*AuthConfig) ProtoMessage() {} -func (*AuthConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{24} -} -func (m *AuthConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AuthConfig.Unmarshal(m, b) -} -func (m *AuthConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AuthConfig.Marshal(b, m, deterministic) -} -func (m *AuthConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthConfig.Merge(m, src) -} -func (m *AuthConfig) XXX_Size() int { - return xxx_messageInfo_AuthConfig.Size(m) -} -func (m *AuthConfig) XXX_DiscardUnknown() { - xxx_messageInfo_AuthConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthConfig proto.InternalMessageInfo - -func (m *AuthConfig) GetSuperAdminNewUsers() bool { - if m != nil { - return m.SuperAdminNewUsers - } - return false -} - -type OrganizationConfig struct { - OrganizationID string `protobuf:"bytes,1,opt,name=OrganizationID,proto3" json:"OrganizationID,omitempty"` - LogViewer *LogViewerConfig `protobuf:"bytes,2,opt,name=LogViewer,proto3" json:"LogViewer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OrganizationConfig) Reset() { *m = OrganizationConfig{} } -func (m *OrganizationConfig) String() string { return proto.CompactTextString(m) } -func (*OrganizationConfig) ProtoMessage() {} -func (*OrganizationConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{25} -} -func (m *OrganizationConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OrganizationConfig.Unmarshal(m, b) -} -func (m *OrganizationConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OrganizationConfig.Marshal(b, m, deterministic) -} -func (m *OrganizationConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrganizationConfig.Merge(m, src) -} -func (m *OrganizationConfig) XXX_Size() int { - return xxx_messageInfo_OrganizationConfig.Size(m) -} -func (m *OrganizationConfig) XXX_DiscardUnknown() { - xxx_messageInfo_OrganizationConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_OrganizationConfig proto.InternalMessageInfo - -func (m *OrganizationConfig) GetOrganizationID() string { - if m != nil { - return m.OrganizationID - } - return "" -} - -func (m *OrganizationConfig) GetLogViewer() *LogViewerConfig { - if m != nil { - return m.LogViewer - } - return nil -} - -type LogViewerConfig struct { - Columns []*LogViewerColumn `protobuf:"bytes,1,rep,name=Columns,proto3" json:"Columns,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogViewerConfig) Reset() { *m = LogViewerConfig{} } -func (m *LogViewerConfig) String() string { return proto.CompactTextString(m) } -func (*LogViewerConfig) ProtoMessage() {} -func (*LogViewerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{26} -} -func (m *LogViewerConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogViewerConfig.Unmarshal(m, b) -} -func (m *LogViewerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogViewerConfig.Marshal(b, m, deterministic) -} -func (m *LogViewerConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogViewerConfig.Merge(m, src) -} -func (m *LogViewerConfig) XXX_Size() int { - return xxx_messageInfo_LogViewerConfig.Size(m) -} -func (m *LogViewerConfig) XXX_DiscardUnknown() { - xxx_messageInfo_LogViewerConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_LogViewerConfig proto.InternalMessageInfo - -func (m *LogViewerConfig) GetColumns() []*LogViewerColumn { - if m != nil { - return m.Columns - } - return nil -} - -type LogViewerColumn struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Position int32 `protobuf:"varint,2,opt,name=Position,proto3" json:"Position,omitempty"` - Encodings []*ColumnEncoding `protobuf:"bytes,3,rep,name=Encodings,proto3" json:"Encodings,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogViewerColumn) Reset() { *m = LogViewerColumn{} } -func (m *LogViewerColumn) String() string { return proto.CompactTextString(m) } -func (*LogViewerColumn) ProtoMessage() {} -func (*LogViewerColumn) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{27} -} -func (m *LogViewerColumn) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogViewerColumn.Unmarshal(m, b) -} -func (m *LogViewerColumn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogViewerColumn.Marshal(b, m, deterministic) -} -func (m *LogViewerColumn) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogViewerColumn.Merge(m, src) -} -func (m *LogViewerColumn) XXX_Size() int { - return xxx_messageInfo_LogViewerColumn.Size(m) -} -func (m *LogViewerColumn) XXX_DiscardUnknown() { - xxx_messageInfo_LogViewerColumn.DiscardUnknown(m) -} - -var xxx_messageInfo_LogViewerColumn proto.InternalMessageInfo - -func (m *LogViewerColumn) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LogViewerColumn) GetPosition() int32 { - if m != nil { - return m.Position - } - return 0 -} - -func (m *LogViewerColumn) GetEncodings() []*ColumnEncoding { - if m != nil { - return m.Encodings - } - return nil -} - -type ColumnEncoding struct { - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` - Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` - Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ColumnEncoding) Reset() { *m = ColumnEncoding{} } -func (m *ColumnEncoding) String() string { return proto.CompactTextString(m) } -func (*ColumnEncoding) ProtoMessage() {} -func (*ColumnEncoding) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{28} -} -func (m *ColumnEncoding) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ColumnEncoding.Unmarshal(m, b) -} -func (m *ColumnEncoding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ColumnEncoding.Marshal(b, m, deterministic) -} -func (m *ColumnEncoding) XXX_Merge(src proto.Message) { - xxx_messageInfo_ColumnEncoding.Merge(m, src) -} -func (m *ColumnEncoding) XXX_Size() int { - return xxx_messageInfo_ColumnEncoding.Size(m) -} -func (m *ColumnEncoding) XXX_DiscardUnknown() { - xxx_messageInfo_ColumnEncoding.DiscardUnknown(m) -} - -var xxx_messageInfo_ColumnEncoding proto.InternalMessageInfo - -func (m *ColumnEncoding) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ColumnEncoding) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *ColumnEncoding) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type BuildInfo struct { - Version string `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"` - Commit string `protobuf:"bytes,2,opt,name=Commit,proto3" json:"Commit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BuildInfo) Reset() { *m = BuildInfo{} } -func (m *BuildInfo) String() string { return proto.CompactTextString(m) } -func (*BuildInfo) ProtoMessage() {} -func (*BuildInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_41f4a519b878ee3b, []int{29} -} -func (m *BuildInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BuildInfo.Unmarshal(m, b) -} -func (m *BuildInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BuildInfo.Marshal(b, m, deterministic) -} -func (m *BuildInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_BuildInfo.Merge(m, src) -} -func (m *BuildInfo) XXX_Size() int { - return xxx_messageInfo_BuildInfo.Size(m) -} -func (m *BuildInfo) XXX_DiscardUnknown() { - xxx_messageInfo_BuildInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_BuildInfo proto.InternalMessageInfo - -func (m *BuildInfo) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *BuildInfo) GetCommit() string { - if m != nil { - return m.Commit - } - return "" -} - -func init() { - proto.RegisterType((*Source)(nil), "internal.Source") - proto.RegisterType((*Dashboard)(nil), "internal.Dashboard") - proto.RegisterType((*DashboardCell)(nil), "internal.DashboardCell") - proto.RegisterMapType((map[string]*Axis)(nil), "internal.DashboardCell.AxesEntry") - proto.RegisterType((*DecimalPlaces)(nil), "internal.DecimalPlaces") - proto.RegisterType((*TableOptions)(nil), "internal.TableOptions") - proto.RegisterType((*RenamableField)(nil), "internal.RenamableField") - proto.RegisterType((*Color)(nil), "internal.Color") - proto.RegisterType((*Legend)(nil), "internal.Legend") - proto.RegisterType((*Axis)(nil), "internal.Axis") - proto.RegisterType((*Template)(nil), "internal.Template") - proto.RegisterType((*TemplateValue)(nil), "internal.TemplateValue") - proto.RegisterType((*TemplateQuery)(nil), "internal.TemplateQuery") - proto.RegisterType((*Server)(nil), "internal.Server") - proto.RegisterType((*Layout)(nil), "internal.Layout") - proto.RegisterType((*Cell)(nil), "internal.Cell") - proto.RegisterMapType((map[string]*Axis)(nil), "internal.Cell.AxesEntry") - proto.RegisterType((*Query)(nil), "internal.Query") - proto.RegisterType((*TimeShift)(nil), "internal.TimeShift") - proto.RegisterType((*Range)(nil), "internal.Range") - proto.RegisterType((*AlertRule)(nil), "internal.AlertRule") - proto.RegisterType((*User)(nil), "internal.User") - proto.RegisterType((*Role)(nil), "internal.Role") - proto.RegisterType((*Mapping)(nil), "internal.Mapping") - proto.RegisterType((*Organization)(nil), "internal.Organization") - proto.RegisterType((*Config)(nil), "internal.Config") - proto.RegisterType((*AuthConfig)(nil), "internal.AuthConfig") - proto.RegisterType((*OrganizationConfig)(nil), "internal.OrganizationConfig") - proto.RegisterType((*LogViewerConfig)(nil), "internal.LogViewerConfig") - proto.RegisterType((*LogViewerColumn)(nil), "internal.LogViewerColumn") - proto.RegisterType((*ColumnEncoding)(nil), "internal.ColumnEncoding") - proto.RegisterType((*BuildInfo)(nil), "internal.BuildInfo") -} - -func init() { proto.RegisterFile("internal.proto", fileDescriptor_41f4a519b878ee3b) } - -var fileDescriptor_41f4a519b878ee3b = []byte{ - // 1810 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0xdc, 0xc8, - 0x11, 0x06, 0x67, 0x86, 0xa3, 0x61, 0x8d, 0x24, 0x0b, 0x1d, 0x63, 0x97, 0xbb, 0x09, 0x82, 0x09, - 0x91, 0x6c, 0x94, 0xc7, 0x3a, 0x0b, 0x19, 0x79, 0x60, 0xb1, 0xbb, 0x80, 0x1e, 0xb6, 0x23, 0x5b, - 0xb6, 0xe5, 0x96, 0xac, 0x9c, 0x82, 0x45, 0x8b, 0xec, 0x99, 0x69, 0x98, 0x43, 0x32, 0x4d, 0x52, - 0x12, 0x73, 0xce, 0x2d, 0xff, 0x21, 0x40, 0x80, 0xe4, 0x1e, 0x04, 0x39, 0x06, 0xc8, 0x3d, 0x3f, - 0x20, 0xbf, 0x27, 0xa8, 0x7e, 0x90, 0x4d, 0x69, 0x6c, 0x38, 0x40, 0xb0, 0xb7, 0xfe, 0xaa, 0x6a, - 0xaa, 0xab, 0xab, 0xab, 0xbe, 0x2e, 0x0e, 0x6c, 0x8b, 0xac, 0xe2, 0x32, 0x63, 0xe9, 0x83, 0x42, - 0xe6, 0x55, 0x4e, 0x26, 0x16, 0x47, 0x7f, 0x18, 0xc2, 0xf8, 0x2c, 0xaf, 0x65, 0xcc, 0xc9, 0x36, - 0x0c, 0x8e, 0x8f, 0x42, 0x6f, 0xe6, 0xed, 0x0e, 0xe9, 0xe0, 0xf8, 0x88, 0x10, 0x18, 0xbd, 0x60, - 0x2b, 0x1e, 0x0e, 0x66, 0xde, 0x6e, 0x40, 0xd5, 0x1a, 0x65, 0xe7, 0x4d, 0xc1, 0xc3, 0xa1, 0x96, - 0xe1, 0x9a, 0x7c, 0x0c, 0x93, 0xd7, 0x25, 0x7a, 0x5b, 0xf1, 0x70, 0xa4, 0xe4, 0x2d, 0x46, 0xdd, - 0x29, 0x2b, 0xcb, 0xeb, 0x5c, 0x26, 0xa1, 0xaf, 0x75, 0x16, 0x93, 0x1d, 0x18, 0xbe, 0xa6, 0x27, - 0xe1, 0x58, 0x89, 0x71, 0x49, 0x42, 0xd8, 0x38, 0xe2, 0x73, 0x56, 0xa7, 0x55, 0xb8, 0x31, 0xf3, - 0x76, 0x27, 0xd4, 0x42, 0xf4, 0x73, 0xce, 0x53, 0xbe, 0x90, 0x6c, 0x1e, 0x4e, 0xb4, 0x1f, 0x8b, - 0xc9, 0x03, 0x20, 0xc7, 0x59, 0xc9, 0xe3, 0x5a, 0xf2, 0xb3, 0x37, 0xa2, 0xb8, 0xe0, 0x52, 0xcc, - 0x9b, 0x30, 0x50, 0x0e, 0xd6, 0x68, 0x70, 0x97, 0xe7, 0xbc, 0x62, 0xb8, 0x37, 0x28, 0x57, 0x16, - 0x92, 0x08, 0x36, 0xcf, 0x96, 0x4c, 0xf2, 0xe4, 0x8c, 0xc7, 0x92, 0x57, 0xe1, 0x54, 0xa9, 0x7b, - 0x32, 0xb4, 0x79, 0x29, 0x17, 0x2c, 0x13, 0xbf, 0x67, 0x95, 0xc8, 0xb3, 0x70, 0x53, 0xdb, 0xb8, - 0x32, 0xcc, 0x12, 0xcd, 0x53, 0x1e, 0x6e, 0xe9, 0x2c, 0xe1, 0x9a, 0x7c, 0x07, 0x02, 0x73, 0x18, - 0x7a, 0x1a, 0x6e, 0x2b, 0x45, 0x27, 0x88, 0xfe, 0xe1, 0x41, 0x70, 0xc4, 0xca, 0xe5, 0x65, 0xce, - 0x64, 0xf2, 0x5e, 0x37, 0xf1, 0x29, 0xf8, 0x31, 0x4f, 0xd3, 0x32, 0x1c, 0xce, 0x86, 0xbb, 0xd3, - 0xbd, 0x0f, 0x1f, 0xb4, 0x57, 0xdc, 0xfa, 0x39, 0xe4, 0x69, 0x4a, 0xb5, 0x15, 0xf9, 0x0c, 0x82, - 0x8a, 0xaf, 0x8a, 0x94, 0x55, 0xbc, 0x0c, 0x47, 0xea, 0x27, 0xa4, 0xfb, 0xc9, 0xb9, 0x51, 0xd1, - 0xce, 0xe8, 0xce, 0x41, 0xfd, 0xbb, 0x07, 0x8d, 0xfe, 0x33, 0x82, 0xad, 0xde, 0x76, 0x64, 0x13, - 0xbc, 0x1b, 0x15, 0xb9, 0x4f, 0xbd, 0x1b, 0x44, 0x8d, 0x8a, 0xda, 0xa7, 0x5e, 0x83, 0xe8, 0x5a, - 0x55, 0x8e, 0x4f, 0xbd, 0x6b, 0x44, 0x4b, 0x55, 0x2f, 0x3e, 0xf5, 0x96, 0xe4, 0x47, 0xb0, 0xf1, - 0xbb, 0x9a, 0x4b, 0xc1, 0xcb, 0xd0, 0x57, 0xd1, 0xdd, 0xeb, 0xa2, 0x7b, 0x55, 0x73, 0xd9, 0x50, - 0xab, 0xc7, 0x6c, 0xa8, 0x5a, 0xd3, 0x85, 0xa3, 0xd6, 0x28, 0xab, 0xb0, 0x2e, 0x37, 0xb4, 0x0c, - 0xd7, 0x26, 0x8b, 0xba, 0x5a, 0x30, 0x8b, 0x3f, 0x87, 0x11, 0xbb, 0xe1, 0x65, 0x18, 0x28, 0xff, - 0xdf, 0x7b, 0x4b, 0xc2, 0x1e, 0xec, 0xdf, 0xf0, 0xf2, 0x51, 0x56, 0xc9, 0x86, 0x2a, 0x73, 0xf2, - 0x43, 0x18, 0xc7, 0x79, 0x9a, 0xcb, 0x32, 0x84, 0xdb, 0x81, 0x1d, 0xa2, 0x9c, 0x1a, 0x35, 0xd9, - 0x85, 0x71, 0xca, 0x17, 0x3c, 0x4b, 0x54, 0xdd, 0x4c, 0xf7, 0x76, 0x3a, 0xc3, 0x13, 0x25, 0xa7, - 0x46, 0x4f, 0x3e, 0x87, 0xcd, 0x8a, 0x5d, 0xa6, 0xfc, 0x65, 0x81, 0x59, 0x2c, 0x55, 0x0d, 0x4d, - 0xf7, 0x3e, 0x70, 0xee, 0xc3, 0xd1, 0xd2, 0x9e, 0x2d, 0xf9, 0x02, 0x36, 0xe7, 0x82, 0xa7, 0x89, - 0xfd, 0xed, 0x96, 0x0a, 0x2a, 0xec, 0x7e, 0x4b, 0x79, 0xc6, 0x56, 0xf8, 0x8b, 0xc7, 0x68, 0x46, - 0x7b, 0xd6, 0xe4, 0xbb, 0x00, 0x95, 0x58, 0xf1, 0xc7, 0xb9, 0x5c, 0xb1, 0xca, 0x94, 0xa1, 0x23, - 0x21, 0x5f, 0xc2, 0x56, 0xc2, 0x63, 0xb1, 0x62, 0xe9, 0x69, 0xca, 0x62, 0x5e, 0x86, 0xf7, 0x54, - 0x68, 0x6e, 0x75, 0xb9, 0x6a, 0xda, 0xb7, 0xfe, 0xf8, 0x09, 0x04, 0x6d, 0xfa, 0xb0, 0xbf, 0xdf, - 0xf0, 0x46, 0x15, 0x43, 0x40, 0x71, 0x49, 0xbe, 0x0f, 0xfe, 0x15, 0x4b, 0x6b, 0x5d, 0xc8, 0xd3, - 0xbd, 0xed, 0xce, 0xeb, 0xfe, 0x8d, 0x28, 0xa9, 0x56, 0x7e, 0x3e, 0xf8, 0x95, 0x17, 0x3d, 0x81, - 0xad, 0xde, 0x46, 0x18, 0xb8, 0x28, 0x1f, 0x65, 0xf3, 0x5c, 0xc6, 0x3c, 0x51, 0x3e, 0x27, 0xd4, - 0x91, 0x90, 0x0f, 0x60, 0x9c, 0x88, 0x85, 0xa8, 0x4a, 0x53, 0x6e, 0x06, 0x45, 0xff, 0xf4, 0x60, - 0xd3, 0xcd, 0x26, 0xf9, 0x31, 0xec, 0x5c, 0x71, 0x59, 0x89, 0x98, 0xa5, 0xe7, 0x62, 0xc5, 0x71, - 0x63, 0xf5, 0x93, 0x09, 0xbd, 0x23, 0x27, 0x9f, 0xc1, 0xb8, 0xcc, 0x65, 0x75, 0xd0, 0xa8, 0xaa, - 0x7d, 0x57, 0x96, 0x8d, 0x1d, 0xf2, 0xd4, 0xb5, 0x64, 0x45, 0x21, 0xb2, 0x85, 0xe5, 0x42, 0x8b, - 0xc9, 0x27, 0xb0, 0x3d, 0x17, 0x37, 0x8f, 0x85, 0x2c, 0xab, 0xc3, 0x3c, 0xad, 0x57, 0x99, 0xaa, - 0xe0, 0x09, 0xbd, 0x25, 0x7d, 0x3a, 0x9a, 0x78, 0x3b, 0x83, 0xa7, 0xa3, 0x89, 0xbf, 0x33, 0x8e, - 0x0a, 0xd8, 0xee, 0xef, 0x84, 0x6d, 0x69, 0x83, 0x50, 0x9c, 0xa0, 0xd3, 0xdb, 0x93, 0x91, 0x19, - 0x4c, 0x13, 0x51, 0x16, 0x29, 0x6b, 0x1c, 0xda, 0x70, 0x45, 0xc8, 0x81, 0x57, 0xa2, 0x14, 0x97, - 0xa9, 0xa6, 0xf2, 0x09, 0xb5, 0x30, 0x5a, 0x80, 0xaf, 0xca, 0xda, 0x21, 0xa1, 0xc0, 0x92, 0x90, - 0xa2, 0xfe, 0x81, 0x43, 0xfd, 0x3b, 0x30, 0xfc, 0x35, 0xbf, 0x31, 0xaf, 0x01, 0x2e, 0x5b, 0xaa, - 0x1a, 0x39, 0x54, 0x75, 0x1f, 0xfc, 0x0b, 0x75, 0xed, 0x9a, 0x42, 0x34, 0x88, 0xbe, 0x82, 0xb1, - 0x6e, 0x8b, 0xd6, 0xb3, 0xe7, 0x78, 0x9e, 0xc1, 0xf4, 0xa5, 0x14, 0x3c, 0xab, 0x34, 0xf9, 0x98, - 0x23, 0x38, 0xa2, 0xe8, 0xef, 0x1e, 0x8c, 0xd4, 0x2d, 0x45, 0xb0, 0x99, 0xf2, 0x05, 0x8b, 0x9b, - 0x83, 0xbc, 0xce, 0x92, 0x32, 0xf4, 0x66, 0xc3, 0xdd, 0x21, 0xed, 0xc9, 0xb0, 0x3c, 0x2e, 0xb5, - 0x76, 0x30, 0x1b, 0xee, 0x06, 0xd4, 0x20, 0x0c, 0x2d, 0x65, 0x97, 0x3c, 0x35, 0x47, 0xd0, 0x00, - 0xad, 0x0b, 0xc9, 0xe7, 0xe2, 0xc6, 0x1c, 0xc3, 0x20, 0x94, 0x97, 0xf5, 0x1c, 0xe5, 0xfa, 0x24, - 0x06, 0xe1, 0x01, 0x2e, 0x59, 0xd9, 0x32, 0x12, 0xae, 0xd1, 0x73, 0x19, 0xb3, 0xd4, 0x52, 0x92, - 0x06, 0xd1, 0xbf, 0x3c, 0x7c, 0xc8, 0x34, 0xc5, 0xde, 0xc9, 0xf0, 0x47, 0x30, 0x41, 0xfa, 0xfd, - 0xfa, 0x8a, 0x49, 0x73, 0xe0, 0x0d, 0xc4, 0x17, 0x4c, 0x92, 0x9f, 0xc1, 0x58, 0x35, 0xc7, 0x1a, - 0xba, 0xb7, 0xee, 0x54, 0x56, 0xa9, 0x31, 0x6b, 0x09, 0x71, 0xe4, 0x10, 0x62, 0x7b, 0x58, 0xdf, - 0x3d, 0xec, 0xa7, 0xe0, 0x23, 0xb3, 0x36, 0x2a, 0xfa, 0xb5, 0x9e, 0x35, 0xff, 0x6a, 0xab, 0x68, - 0x01, 0x5b, 0xbd, 0x1d, 0xdb, 0x9d, 0xbc, 0xfe, 0x4e, 0x5d, 0xa3, 0x07, 0xa6, 0xb1, 0xb1, 0x39, - 0x4a, 0x9e, 0xf2, 0xb8, 0xe2, 0x89, 0xa9, 0xba, 0x16, 0x5b, 0xb2, 0x18, 0xb5, 0x64, 0x11, 0xfd, - 0xd9, 0xeb, 0x76, 0x52, 0x11, 0x60, 0xd1, 0xc6, 0xf9, 0x6a, 0xc5, 0xb2, 0xc4, 0x6c, 0x66, 0x21, - 0x66, 0x32, 0xb9, 0x34, 0x9b, 0x0d, 0x92, 0x4b, 0xc4, 0xb2, 0x30, 0x77, 0x3a, 0x90, 0x05, 0x56, - 0xd3, 0x8a, 0xb3, 0xb2, 0x96, 0x7c, 0xc5, 0xb3, 0xca, 0xec, 0xe2, 0x8a, 0xc8, 0x87, 0xb0, 0x51, - 0xb1, 0xc5, 0xd7, 0x18, 0x83, 0xb9, 0xdb, 0x8a, 0x2d, 0x9e, 0xf1, 0x86, 0x7c, 0x1b, 0x02, 0xc5, - 0xa0, 0x4a, 0xa5, 0x2f, 0x78, 0xa2, 0x04, 0xcf, 0x78, 0x13, 0xfd, 0x6d, 0x00, 0xe3, 0x33, 0x2e, - 0xaf, 0xb8, 0x7c, 0xaf, 0x37, 0xdb, 0x9d, 0x94, 0x86, 0xef, 0x98, 0x94, 0x46, 0xeb, 0x27, 0x25, - 0xbf, 0x9b, 0x94, 0xee, 0x83, 0x7f, 0x26, 0xe3, 0xe3, 0x23, 0x15, 0xd1, 0x90, 0x6a, 0x80, 0xf5, - 0xb9, 0x1f, 0x57, 0xe2, 0x8a, 0x9b, 0xf1, 0xc9, 0xa0, 0x3b, 0x4f, 0xf9, 0x64, 0xcd, 0xcc, 0xf2, - 0xbf, 0x4e, 0x51, 0xb6, 0x69, 0xc1, 0x69, 0xda, 0x08, 0x36, 0x71, 0x94, 0x4a, 0x58, 0xc5, 0x9e, - 0x9e, 0xbd, 0x7c, 0x61, 0xe7, 0x27, 0x57, 0x16, 0xfd, 0xc9, 0x83, 0xf1, 0x09, 0x6b, 0xf2, 0xba, - 0xba, 0x53, 0xff, 0x33, 0x98, 0xee, 0x17, 0x45, 0x2a, 0xe2, 0x5e, 0xcf, 0x3b, 0x22, 0xb4, 0x78, - 0xee, 0xdc, 0xa3, 0xce, 0xa1, 0x2b, 0xc2, 0x27, 0xe6, 0x50, 0x8d, 0x45, 0x7a, 0xc6, 0x71, 0x9e, - 0x18, 0x3d, 0x0d, 0x29, 0x25, 0x26, 0x7b, 0xbf, 0xae, 0xf2, 0x79, 0x9a, 0x5f, 0xab, 0xac, 0x4e, - 0x68, 0x8b, 0xa3, 0x7f, 0x0f, 0x60, 0xf4, 0x4d, 0x8d, 0x32, 0x9b, 0xe0, 0x09, 0x53, 0x54, 0x9e, - 0x68, 0x07, 0x9b, 0x0d, 0x67, 0xb0, 0x09, 0x61, 0xa3, 0x91, 0x2c, 0x5b, 0xf0, 0x32, 0x9c, 0x28, - 0x5e, 0xb3, 0x50, 0x69, 0x54, 0x07, 0xeb, 0x89, 0x26, 0xa0, 0x16, 0xb6, 0x1d, 0x09, 0x4e, 0x47, - 0xfe, 0xd4, 0x0c, 0x3f, 0xd3, 0xdb, 0xe3, 0xc2, 0xba, 0x99, 0xe7, 0xff, 0xf7, 0x8e, 0xff, 0x71, - 0x00, 0x7e, 0xdb, 0xbc, 0x87, 0xfd, 0xe6, 0x3d, 0xec, 0x9a, 0xf7, 0xe8, 0xc0, 0x36, 0xef, 0xd1, - 0x01, 0x62, 0x7a, 0x6a, 0x9b, 0x97, 0x9e, 0xe2, 0x65, 0x3d, 0x91, 0x79, 0x5d, 0x1c, 0x34, 0xfa, - 0x56, 0x03, 0xda, 0x62, 0xac, 0xf8, 0xdf, 0x2c, 0xb9, 0x34, 0xa9, 0x0e, 0xa8, 0x41, 0xd8, 0x1f, - 0x27, 0x8a, 0xea, 0x74, 0x72, 0x35, 0x20, 0x3f, 0x00, 0x9f, 0x62, 0xf2, 0x54, 0x86, 0x7b, 0xf7, - 0xa2, 0xc4, 0x54, 0x6b, 0xd1, 0xa9, 0xfe, 0x24, 0x32, 0x8d, 0x62, 0x3f, 0x90, 0x7e, 0x02, 0xe3, - 0xb3, 0xa5, 0x98, 0x57, 0x76, 0x84, 0xfc, 0x96, 0x43, 0x95, 0x62, 0xc5, 0x95, 0x8e, 0x1a, 0x93, - 0x75, 0xfd, 0x11, 0xbd, 0x82, 0xa0, 0x35, 0xec, 0x42, 0xf4, 0xdc, 0x10, 0x09, 0x8c, 0x5e, 0x67, - 0xa2, 0xb2, 0xb4, 0x81, 0x6b, 0x4c, 0xc0, 0xab, 0x9a, 0x65, 0x95, 0xa8, 0x1a, 0x4b, 0x1b, 0x16, - 0x47, 0x0f, 0xcd, 0x91, 0xd0, 0xdd, 0xeb, 0xa2, 0xe0, 0xd2, 0x50, 0x90, 0x06, 0x6a, 0x93, 0xfc, - 0x9a, 0xeb, 0xf7, 0x64, 0x48, 0x35, 0x88, 0x7e, 0x0b, 0xc1, 0x7e, 0xca, 0x65, 0x45, 0xeb, 0x94, - 0xaf, 0x7b, 0xe7, 0x55, 0xf3, 0x9a, 0x08, 0x70, 0xdd, 0xd1, 0xcd, 0xf0, 0x16, 0xdd, 0x3c, 0x63, - 0x05, 0x3b, 0x3e, 0x52, 0xb5, 0x3f, 0xa4, 0x06, 0x45, 0x7f, 0xf1, 0x60, 0x84, 0xbc, 0xe6, 0xb8, - 0x1e, 0xbd, 0x8b, 0x13, 0x4f, 0x65, 0x7e, 0x25, 0x12, 0x2e, 0xed, 0xe1, 0x2c, 0x56, 0x17, 0x11, - 0x2f, 0x79, 0x3b, 0x4e, 0x18, 0x84, 0xf5, 0x87, 0xdf, 0x54, 0xb6, 0xbf, 0x9c, 0xfa, 0x43, 0x31, - 0xd5, 0x4a, 0x1c, 0x19, 0xcf, 0xea, 0x82, 0xcb, 0xfd, 0x64, 0x25, 0xec, 0xac, 0xe5, 0x48, 0xa2, - 0xaf, 0xf4, 0x57, 0xda, 0x1d, 0x76, 0xf4, 0xd6, 0x7f, 0xd1, 0xdd, 0x8e, 0x3c, 0xfa, 0xab, 0x07, - 0x1b, 0xcf, 0xcd, 0x6c, 0xe7, 0x9e, 0xc2, 0x7b, 0xeb, 0x29, 0x06, 0xbd, 0x53, 0xec, 0xc1, 0x7d, - 0x6b, 0xd3, 0xdb, 0x5f, 0x67, 0x61, 0xad, 0xce, 0x64, 0x74, 0xd4, 0x5e, 0xd6, 0xfb, 0x7c, 0xa4, - 0x9d, 0xf7, 0x6d, 0xd6, 0x5d, 0xf8, 0x9d, 0x5b, 0x99, 0xc1, 0xd4, 0x7e, 0x9c, 0xe6, 0xa9, 0x7d, - 0xac, 0x5c, 0x51, 0xb4, 0x07, 0xe3, 0xc3, 0x3c, 0x9b, 0x8b, 0x05, 0xd9, 0x85, 0xd1, 0x7e, 0x5d, - 0x2d, 0x95, 0xc7, 0xe9, 0xde, 0x7d, 0x87, 0x0c, 0xea, 0x6a, 0xa9, 0x6d, 0xa8, 0xb2, 0x88, 0xbe, - 0x00, 0xe8, 0x64, 0xf8, 0xe2, 0x74, 0xb7, 0xf1, 0x82, 0x5f, 0x63, 0xc9, 0x94, 0x66, 0xb4, 0x5f, - 0xa3, 0x89, 0x6a, 0x20, 0xee, 0x39, 0x8c, 0x97, 0x4f, 0x60, 0xdb, 0x95, 0xb6, 0x27, 0xbb, 0x25, - 0x25, 0xbf, 0x84, 0xe0, 0x24, 0x5f, 0x5c, 0x08, 0x6e, 0xbb, 0x61, 0xba, 0xf7, 0x91, 0xf3, 0x81, - 0x66, 0x55, 0x26, 0xde, 0xce, 0x36, 0x7a, 0x0c, 0xf7, 0x6e, 0x69, 0xc9, 0x43, 0xe4, 0x32, 0x9c, - 0xd5, 0xf5, 0xb0, 0xf9, 0x36, 0x4f, 0x68, 0x41, 0xad, 0x65, 0xd4, 0xf4, 0xfc, 0xa0, 0xac, 0xcd, - 0xbc, 0x77, 0xab, 0x1f, 0xf2, 0x52, 0xb4, 0x2f, 0xa0, 0x4f, 0x5b, 0x4c, 0x7e, 0x01, 0xc1, 0xa3, - 0x2c, 0xce, 0x13, 0x91, 0x2d, 0xec, 0x20, 0x18, 0xf6, 0xbe, 0x46, 0xeb, 0x55, 0x66, 0x0d, 0x68, - 0x67, 0x1a, 0xbd, 0x80, 0xed, 0xbe, 0x72, 0xed, 0xc8, 0xdd, 0x8e, 0xe9, 0x03, 0x67, 0x4c, 0x6f, - 0x63, 0x1c, 0x3a, 0x95, 0xff, 0x25, 0x04, 0x07, 0xb5, 0x48, 0x93, 0xe3, 0x6c, 0x9e, 0x23, 0xb1, - 0x5f, 0x70, 0x59, 0x76, 0x9d, 0x63, 0x21, 0x16, 0x3e, 0x72, 0x7c, 0xcb, 0x66, 0x06, 0x5d, 0x8e, - 0xd5, 0x9f, 0x50, 0x0f, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xff, 0x9a, 0x3e, 0x9f, 0x96, 0x12, - 0x00, 0x00, -} diff --git a/chronograf/bolt/internal/internal.proto b/chronograf/bolt/internal/internal.proto deleted file mode 100644 index b8d94c3761c..00000000000 --- a/chronograf/bolt/internal/internal.proto +++ /dev/null @@ -1,247 +0,0 @@ -syntax = "proto3"; -package internal; - -message Source { - int64 ID = 1; // ID is the unique ID of the source - string Name = 2; // Name is the user-defined name for the source - string Type = 3; // Type specifies which kinds of source (enterprise vs oss) - string Username = 4; // Username is the username to connect to the source - string Password = 5; - string URL = 6; // URL are the connections to the source - bool Default = 7; // Flags an source as the default. - string Telegraf = 8; // Telegraf is the db telegraf is written to. By default it is "telegraf" - bool InsecureSkipVerify = 9; // InsecureSkipVerify accepts any certificate from the influx server - string MetaURL = 10; // MetaURL is the connection URL for the meta node. - string SharedSecret = 11; // SharedSecret signs the optional InfluxDB JWT Authorization - string Organization = 12; // Organization is the organization ID that resource belongs to - string Role = 13; // Role is the name of the miniumum role that a user must possess to access the resource - string DefaultRP = 14; // DefaultRP is the default retention policy used in database queries to this source -} - -message Dashboard { - int64 ID = 1; // ID is the unique ID of the dashboard - string Name = 2; // Name is the user-defined name of the dashboard - repeated DashboardCell cells = 3; // a representation of all visual data required for rendering the dashboard - repeated Template templates = 4; // Templates replace template variables within InfluxQL - string Organization = 5; // Organization is the organization ID that resource belongs to -} - -message DashboardCell { - int32 x = 1; // X-coordinate of Cell in the Dashboard - int32 y = 2; // Y-coordinate of Cell in the Dashboard - int32 w = 3; // Width of Cell in the Dashboard - int32 h = 4; // Height of Cell in the Dashboard - repeated Query queries = 5; // Time-series data queries for Dashboard - string name = 6; // User-facing name for this Dashboard - string type = 7; // Dashboard visualization type - string ID = 8; // id is the unique id of the dashboard. MIGRATED FIELD added in 1.2.0-beta6 - map axes = 9; // Axes represent the graphical viewport for a cell's visualizations - repeated Color colors = 10; // Colors represent encoding data values to color - Legend legend = 11; // Legend is summary information for a cell - TableOptions tableOptions = 12; // TableOptions for visualization of cell with type 'table' - repeated RenamableField fieldOptions = 13; // Options for each of the fields returned in a cell - string timeFormat = 14; // format for time - DecimalPlaces decimalPlaces = 15; // Represents how precise the values of this field should be -} - -message DecimalPlaces { - bool isEnforced = 1; // whether decimal places should be enforced - int32 digits = 2; // the number of digits to display after decical point -} - -message TableOptions { - reserved 1; - bool verticalTimeAxis = 2; // time axis should be a column not row - RenamableField sortBy = 3; // which column should a table be sorted by - string wrapping = 4; // option for text wrapping - reserved 5; - bool fixFirstColumn = 6; // first column should be fixed/frozen -} - -message RenamableField { - string internalName = 1; // name of column - string displayName = 2; // what column is renamed to - bool visible = 3; // Represents whether RenamableField is visible -} - -message Color { - string ID = 1; // ID is the unique id of the cell color - string Type = 2; // Type is how the color is used. Accepted (min,max,threshold) - string Hex = 3; // Hex is the hex number of the color - string Name = 4; // Name is the user-facing name of the hex color - string Value = 5; // Value is the data value mapped to this color -} - -message Legend { - string Type = 1; // Type is how the legend is used - string Orientation = 2; // Orientation is the location of the legend on the cell -} - -message Axis { - repeated int64 legacyBounds = 1; // legacyBounds are an ordered 2-tuple consisting of lower and upper axis extents, respectively - repeated string bounds = 2; // bounds are an arbitrary list of client-defined bounds. - string label = 3; // label is a description of this axis - string prefix = 4; // specifies the prefix for axis values - string suffix = 5; // specifies the suffix for axis values - string base = 6; // defines the base for axis values - string scale = 7; // represents the magnitude of the numbers on this axis -} - -message Template { - string ID = 1; // ID is the unique ID associated with this template - string temp_var = 2; - repeated TemplateValue values = 3; - string type = 4; // Type can be fieldKeys, tagKeys, tagValues, CSV, constant, query, measurements, databases - string label = 5; // Label is a user-facing description of the Template - TemplateQuery query = 6; // Query is used to generate the choices for a template -} - -message TemplateValue { - string type = 1; // Type can be tagKey, tagValue, fieldKey, csv, map, measurement, database, constant - string value = 2; // Value is the specific value used to replace a template in an InfluxQL query - bool selected = 3; // Selected states that this variable has been picked to use for replacement - string key = 4; // Key is the key for a specific Value if the Template Type is map (optional) -} - -message TemplateQuery { - string command = 1; // Command is the query itself - string db = 2; // DB the database for the query (optional) - string rp = 3; // RP is a retention policy and optional; - string measurement = 4; // Measurement is the optinally selected measurement for the query - string tag_key = 5; // TagKey is the optionally selected tag key for the query - string field_key = 6; // FieldKey is the optionally selected field key for the query -} - -message Server { - int64 ID = 1; // ID is the unique ID of the server - string Name = 2; // Name is the user-defined name for the server - string Username = 3; // Username is the username to connect to the server - string Password = 4; - string URL = 5; // URL is the path to the server - int64 SrcID = 6; // SrcID is the ID of the data source - bool Active = 7; // is this the currently active server for the source - string Organization = 8; // Organization is the organization ID that resource belongs to - bool InsecureSkipVerify = 9; // InsecureSkipVerify accepts any certificate from the client - string Type = 10; // Type is the kind of the server (e.g. flux) - string MetadataJSON = 11; // JSON byte representation of the metadata -} - -message Layout { - string ID = 1; // ID is the unique ID of the layout. - string Application = 2; // Application is the user facing name of this Layout. - string Measurement = 3; // Measurement is the descriptive name of the time series data. - repeated Cell Cells = 4; // Cells are the individual visualization elements. - bool Autoflow = 5; // Autoflow indicates whether the frontend should layout the cells automatically. -} - -message Cell { - int32 x = 1; // X-coordinate of Cell in the Layout - int32 y = 2; // Y-coordinate of Cell in the Layout - int32 w = 3; // Width of Cell in the Layout - int32 h = 4; // Height of Cell in the Layout - repeated Query queries = 5; // Time-series data queries for Cell. - string i = 6; // Unique identifier for the cell - string name = 7; // User-facing name for this cell - repeated int64 yranges = 8; // Limits of the y-axes - repeated string ylabels = 9; // Labels of the y-axes - string type = 10; // Cell visualization type - map axes = 11; // Axes represent the graphical viewport for a cell's visualizations -} - -message Query { - string Command = 1; // Command is the query itself - string DB = 2; // DB the database for the query (optional) - string RP = 3; // RP is a retention policy and optional; - repeated string GroupBys = 4; // GroupBys define the groups to combine in the query - repeated string Wheres = 5; // Wheres define the restrictions on the query - string Label = 6; // Label is the name of the Y-Axis - Range Range = 7; // Range is the upper and lower bound of the Y-Axis - string Source = 8; // Source is the optional URI to the data source - repeated TimeShift Shifts = 9; // TimeShift represents a shift to apply to an influxql query's time range - string Type = 10; -} - -message TimeShift { - string Label = 1; // Label user facing description - string Unit = 2; // Unit influxql time unit representation i.e. ms, s, m, h, d - string Quantity = 3; // Quantity number of units -} - -message Range { - int64 Upper = 1; // Upper is the upper-bound of the range - int64 Lower = 2; // Lower is the lower-bound of the range -} - -message AlertRule { - string ID = 1; // ID is the unique ID of this alert rule - string JSON = 2; // JSON byte representation of the alert - int64 SrcID = 3; // SrcID is the id of the source this alert is associated with - int64 KapaID = 4; // KapaID is the id of the kapacitor this alert is associated with -} - -message User { - uint64 ID = 1; // ID is the unique ID of this user - string Name = 2; // Name is the user's login name - string Provider = 3; // Provider is the provider that certifies and issues this user's authentication, e.g. GitHub - string Scheme = 4; // Scheme is the scheme used to perform this user's authentication, e.g. OAuth2 or LDAP - repeated Role Roles = 5; // Roles is set of roles a user has - bool SuperAdmin = 6; // SuperAdmin is bool that specifies whether a user is a super admin -} - -message Role { - string Organization = 1; // Organization is the ID of the organization that this user has a role in - string Name = 2; // Name is the name of the role of this user in the respective organization -} - -message Mapping { - string Provider = 1; // Provider is the provider that certifies and issues this user's authentication, e.g. GitHub - string Scheme = 2; // Scheme is the scheme used to perform this user's authentication, e.g. OAuth2 or LDAP - string ProviderOrganization = 3; // ProviderOrganization is the group or organizations that you are a part of in an auth provider - string ID = 4; // ID is the unique ID for the mapping - string Organization = 5; // Organization is the organization ID that resource belongs to -} - -message Organization { - string ID = 1; // ID is the unique ID of the organization - string Name = 2; // Name is the organization's name - string DefaultRole = 3; // DefaultRole is the name of the role that is the default for any users added to the organization -} - -message Config { - AuthConfig Auth = 1; // Auth is the configuration for options that auth related -} - -message AuthConfig { - bool SuperAdminNewUsers = 1; // SuperAdminNewUsers configuration option that specifies which users will auto become super admin -} - -message OrganizationConfig { - string OrganizationID = 1; // OrganizationID is the ID of the organization this config belogs to - LogViewerConfig LogViewer = 2; // LogViewer is the organization configuration for log viewer -} - -message LogViewerConfig { - repeated LogViewerColumn Columns = 1; // Columns is the array of columns in the log viewer -} - -message LogViewerColumn { - string Name = 1; // Name is the unique identifier of the log viewer column - int32 Position = 2; // Position is the position of the column in the log viewer's array of columns - repeated ColumnEncoding Encodings = 3; // Encodings is the array of encoded properties associated with a log viewer column -} - -message ColumnEncoding { - string Type = 1; // Type is the purpose of the encoding, for example: severity color - string Value = 2; // Value is what the encoding corresponds to - string Name = 3; // Name is the optional encoding name -} - -message BuildInfo { - string Version = 1; // Version is a descriptive git SHA identifier - string Commit = 2; // Commit is an abbreviated SHA -} - -// The following is a vim modeline, it autoconfigures vim to have the -// appropriate tabbing and whitespace management to edit this file -// -// vim: ai:ts=4:noet:sts=4 diff --git a/chronograf/bolt/internal/internal_test.go b/chronograf/bolt/internal/internal_test.go deleted file mode 100644 index 363ed42533d..00000000000 --- a/chronograf/bolt/internal/internal_test.go +++ /dev/null @@ -1,488 +0,0 @@ -package internal_test - -import ( - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" -) - -func TestMarshalSource(t *testing.T) { - v := chronograf.Source{ - ID: 12, - Name: "Fountain of Truth", - Type: "influx", - Username: "docbrown", - Password: "1 point twenty-one g1g@w@tts", - URL: "http://twin-pines.mall.io:8086", - MetaURL: "http://twin-pines.meta.io:8086", - Default: true, - Telegraf: "telegraf", - } - - var vv chronograf.Source - if buf, err := internal.MarshalSource(v); err != nil { - t.Fatal(err) - } else if err := internal.UnmarshalSource(buf, &vv); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, vv) { - t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v) - } - - // Test if the new insecureskipverify works - v.InsecureSkipVerify = true - if buf, err := internal.MarshalSource(v); err != nil { - t.Fatal(err) - } else if err := internal.UnmarshalSource(buf, &vv); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, vv) { - t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v) - } -} -func TestMarshalSourceWithSecret(t *testing.T) { - v := chronograf.Source{ - ID: 12, - Name: "Fountain of Truth", - Type: "influx", - Username: "docbrown", - SharedSecret: "hunter2s", - URL: "http://twin-pines.mall.io:8086", - MetaURL: "http://twin-pines.meta.io:8086", - Default: true, - Telegraf: "telegraf", - } - - var vv chronograf.Source - if buf, err := internal.MarshalSource(v); err != nil { - t.Fatal(err) - } else if err := internal.UnmarshalSource(buf, &vv); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, vv) { - t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v) - } - - // Test if the new insecureskipverify works - v.InsecureSkipVerify = true - if buf, err := internal.MarshalSource(v); err != nil { - t.Fatal(err) - } else if err := internal.UnmarshalSource(buf, &vv); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, vv) { - t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v) - } -} - -func TestMarshalServer(t *testing.T) { - v := chronograf.Server{ - ID: 12, - SrcID: 2, - Name: "Fountain of Truth", - Username: "docbrown", - Password: "1 point twenty-one g1g@w@tts", - URL: "http://oldmanpeabody.mall.io:9092", - InsecureSkipVerify: true, - } - - var vv chronograf.Server - if buf, err := internal.MarshalServer(v); err != nil { - t.Fatal(err) - } else if err := internal.UnmarshalServer(buf, &vv); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, vv) { - t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v) - } -} - -func TestMarshalLayout(t *testing.T) { - layout := chronograf.Layout{ - ID: "id", - Measurement: "measurement", - Application: "app", - Cells: []chronograf.Cell{ - { - X: 1, - Y: 1, - W: 4, - H: 4, - I: "anotherid", - Type: "line", - Name: "cell1", - Axes: map[string]chronograf.Axis{ - "y": chronograf.Axis{ - Bounds: []string{"0", "100"}, - Label: "foo", - }, - }, - Queries: []chronograf.Query{ - { - Range: &chronograf.Range{ - Lower: 1, - Upper: 2, - }, - Label: "y1", - Command: "select mean(usage_user) as usage_user from cpu", - Wheres: []string{ - `"host"="myhost"`, - }, - GroupBys: []string{ - `"cpu"`, - }, - }, - }, - }, - }, - } - - var vv chronograf.Layout - if buf, err := internal.MarshalLayout(layout); err != nil { - t.Fatal(err) - } else if err := internal.UnmarshalLayout(buf, &vv); err != nil { - t.Fatal(err) - } else if !cmp.Equal(layout, vv) { - t.Fatal("source protobuf copy error: diff:\n", cmp.Diff(layout, vv)) - } -} - -func Test_MarshalDashboard(t *testing.T) { - dashboard := chronograf.Dashboard{ - ID: 1, - Cells: []chronograf.DashboardCell{ - { - ID: "9b5367de-c552-4322-a9e8-7f384cbd235c", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "Super awesome query", - Queries: []chronograf.DashboardQuery{ - { - Command: "select * from cpu", - Label: "CPU Utilization", - Range: &chronograf.Range{ - Upper: int64(100), - }, - Source: "/chronograf/v1/sources/1", - Shifts: []chronograf.TimeShift{}, - }, - }, - Axes: map[string]chronograf.Axis{ - "y": chronograf.Axis{ - Bounds: []string{"0", "3", "1-7", "foo"}, - Label: "foo", - Prefix: "M", - Suffix: "m", - Base: "2", - Scale: "roflscale", - }, - }, - Type: "line", - CellColors: []chronograf.CellColor{ - { - ID: "myid", - Type: "min", - Hex: "#234567", - Name: "Laser", - Value: "0", - }, - { - ID: "id2", - Type: "max", - Hex: "#876543", - Name: "Solitude", - Value: "100", - }, - }, - TableOptions: chronograf.TableOptions{}, - FieldOptions: []chronograf.RenamableField{}, - TimeFormat: "", - }, - }, - Templates: []chronograf.Template{}, - Name: "Dashboard", - } - - var actual chronograf.Dashboard - if buf, err := internal.MarshalDashboard(dashboard); err != nil { - t.Fatal("Error marshaling dashboard: err", err) - } else if err := internal.UnmarshalDashboard(buf, &actual); err != nil { - t.Fatal("Error unmarshalling dashboard: err:", err) - } else if !cmp.Equal(dashboard, actual) { - t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(dashboard, actual)) - } -} - -func Test_MarshalDashboard_WithLegacyBounds(t *testing.T) { - dashboard := chronograf.Dashboard{ - ID: 1, - Cells: []chronograf.DashboardCell{ - { - ID: "9b5367de-c552-4322-a9e8-7f384cbd235c", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "Super awesome query", - Queries: []chronograf.DashboardQuery{ - { - Command: "select * from cpu", - Label: "CPU Utilization", - Range: &chronograf.Range{ - Upper: int64(100), - }, - Shifts: []chronograf.TimeShift{}, - }, - }, - Axes: map[string]chronograf.Axis{ - "y": chronograf.Axis{ - LegacyBounds: [2]int64{0, 5}, - }, - }, - CellColors: []chronograf.CellColor{ - { - ID: "myid", - Type: "min", - Hex: "#234567", - Name: "Laser", - Value: "0", - }, - { - ID: "id2", - Type: "max", - Hex: "#876543", - Name: "Solitude", - Value: "100", - }, - }, - Legend: chronograf.Legend{ - Type: "static", - Orientation: "bottom", - }, - TableOptions: chronograf.TableOptions{}, - TimeFormat: "MM:DD:YYYY", - FieldOptions: []chronograf.RenamableField{}, - Type: "line", - }, - }, - Templates: []chronograf.Template{}, - Name: "Dashboard", - } - - expected := chronograf.Dashboard{ - ID: 1, - Cells: []chronograf.DashboardCell{ - { - ID: "9b5367de-c552-4322-a9e8-7f384cbd235c", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "Super awesome query", - Queries: []chronograf.DashboardQuery{ - { - Command: "select * from cpu", - Label: "CPU Utilization", - Range: &chronograf.Range{ - Upper: int64(100), - }, - Shifts: []chronograf.TimeShift{}, - }, - }, - Axes: map[string]chronograf.Axis{ - "y": chronograf.Axis{ - Bounds: []string{}, - Base: "10", - Scale: "linear", - }, - }, - CellColors: []chronograf.CellColor{ - { - ID: "myid", - Type: "min", - Hex: "#234567", - Name: "Laser", - Value: "0", - }, - { - ID: "id2", - Type: "max", - Hex: "#876543", - Name: "Solitude", - Value: "100", - }, - }, - Legend: chronograf.Legend{ - Type: "static", - Orientation: "bottom", - }, - TableOptions: chronograf.TableOptions{}, - FieldOptions: []chronograf.RenamableField{}, - TimeFormat: "MM:DD:YYYY", - Type: "line", - }, - }, - Templates: []chronograf.Template{}, - Name: "Dashboard", - } - - var actual chronograf.Dashboard - if buf, err := internal.MarshalDashboard(dashboard); err != nil { - t.Fatal("Error marshaling dashboard: err", err) - } else if err := internal.UnmarshalDashboard(buf, &actual); err != nil { - t.Fatal("Error unmarshalling dashboard: err:", err) - } else if !cmp.Equal(expected, actual) { - t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(expected, actual)) - } -} - -func Test_MarshalDashboard_WithEmptyLegacyBounds(t *testing.T) { - dashboard := chronograf.Dashboard{ - ID: 1, - Cells: []chronograf.DashboardCell{ - { - ID: "9b5367de-c552-4322-a9e8-7f384cbd235c", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "Super awesome query", - Queries: []chronograf.DashboardQuery{ - { - Command: "select * from cpu", - Label: "CPU Utilization", - Range: &chronograf.Range{ - Upper: int64(100), - }, - Shifts: []chronograf.TimeShift{}, - }, - }, - Axes: map[string]chronograf.Axis{ - "y": chronograf.Axis{ - LegacyBounds: [2]int64{}, - }, - }, - CellColors: []chronograf.CellColor{ - { - ID: "myid", - Type: "min", - Hex: "#234567", - Name: "Laser", - Value: "0", - }, - { - ID: "id2", - Type: "max", - Hex: "#876543", - Name: "Solitude", - Value: "100", - }, - }, - Type: "line", - TableOptions: chronograf.TableOptions{}, - FieldOptions: []chronograf.RenamableField{}, - TimeFormat: "MM:DD:YYYY", - }, - }, - Templates: []chronograf.Template{}, - Name: "Dashboard", - } - - expected := chronograf.Dashboard{ - ID: 1, - Cells: []chronograf.DashboardCell{ - { - ID: "9b5367de-c552-4322-a9e8-7f384cbd235c", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "Super awesome query", - Queries: []chronograf.DashboardQuery{ - { - Command: "select * from cpu", - Label: "CPU Utilization", - Range: &chronograf.Range{ - Upper: int64(100), - }, - Shifts: []chronograf.TimeShift{}, - }, - }, - Axes: map[string]chronograf.Axis{ - "y": chronograf.Axis{ - Bounds: []string{}, - Base: "10", - Scale: "linear", - }, - }, - CellColors: []chronograf.CellColor{ - { - ID: "myid", - Type: "min", - Hex: "#234567", - Name: "Laser", - Value: "0", - }, - { - ID: "id2", - Type: "max", - Hex: "#876543", - Name: "Solitude", - Value: "100", - }, - }, - TableOptions: chronograf.TableOptions{}, - FieldOptions: []chronograf.RenamableField{}, - TimeFormat: "MM:DD:YYYY", - Type: "line", - }, - }, - Templates: []chronograf.Template{}, - Name: "Dashboard", - } - - var actual chronograf.Dashboard - if buf, err := internal.MarshalDashboard(dashboard); err != nil { - t.Fatal("Error marshaling dashboard: err", err) - } else if err := internal.UnmarshalDashboard(buf, &actual); err != nil { - t.Fatal("Error unmarshalling dashboard: err:", err) - } else if !cmp.Equal(expected, actual) { - t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(expected, actual)) - } -} - -func Test_MarshalDashboard_WithEmptyCellType(t *testing.T) { - dashboard := chronograf.Dashboard{ - ID: 1, - Cells: []chronograf.DashboardCell{ - { - ID: "9b5367de-c552-4322-a9e8-7f384cbd235c", - }, - }, - } - - expected := chronograf.Dashboard{ - ID: 1, - Cells: []chronograf.DashboardCell{ - { - ID: "9b5367de-c552-4322-a9e8-7f384cbd235c", - Type: "line", - Queries: []chronograf.DashboardQuery{}, - Axes: map[string]chronograf.Axis{}, - CellColors: []chronograf.CellColor{}, - TableOptions: chronograf.TableOptions{}, - FieldOptions: []chronograf.RenamableField{}, - }, - }, - Templates: []chronograf.Template{}, - } - - var actual chronograf.Dashboard - if buf, err := internal.MarshalDashboard(dashboard); err != nil { - t.Fatal("Error marshaling dashboard: err", err) - } else if err := internal.UnmarshalDashboard(buf, &actual); err != nil { - t.Fatal("Error unmarshalling dashboard: err:", err) - } else if !cmp.Equal(expected, actual) { - t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(expected, actual)) - } -} diff --git a/chronograf/bolt/layouts.go b/chronograf/bolt/layouts.go deleted file mode 100644 index 81ad5e0cf88..00000000000 --- a/chronograf/bolt/layouts.go +++ /dev/null @@ -1,128 +0,0 @@ -package bolt - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure LayoutsStore implements chronograf.LayoutsStore. -var _ chronograf.LayoutsStore = &LayoutsStore{} - -// LayoutsBucket is the bolt bucket layouts are stored in -var LayoutsBucket = []byte("Layout") - -// LayoutsStore is the bolt implementation to store layouts -type LayoutsStore struct { - client *Client - IDs chronograf.ID -} - -func (s *LayoutsStore) Migrate(ctx context.Context) error { - return nil -} - -// All returns all known layouts -func (s *LayoutsStore) All(ctx context.Context) ([]chronograf.Layout, error) { - var srcs []chronograf.Layout - if err := s.client.db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket(LayoutsBucket).ForEach(func(k, v []byte) error { - var src chronograf.Layout - if err := internal.UnmarshalLayout(v, &src); err != nil { - return err - } - srcs = append(srcs, src) - return nil - }); err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - - return srcs, nil - -} - -// Add creates a new Layout in the LayoutsStore. -func (s *LayoutsStore) Add(ctx context.Context, src chronograf.Layout) (chronograf.Layout, error) { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(LayoutsBucket) - id, err := s.IDs.Generate() - if err != nil { - return err - } - - src.ID = id - if v, err := internal.MarshalLayout(src); err != nil { - return err - } else if err := b.Put([]byte(src.ID), v); err != nil { - return err - } - return nil - }); err != nil { - return chronograf.Layout{}, err - } - - return src, nil -} - -// Delete removes the Layout from the LayoutsStore -func (s *LayoutsStore) Delete(ctx context.Context, src chronograf.Layout) error { - _, err := s.Get(ctx, src.ID) - if err != nil { - return err - } - if err := s.client.db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket(LayoutsBucket).Delete([]byte(src.ID)); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil -} - -// Get returns a Layout if the id exists. -func (s *LayoutsStore) Get(ctx context.Context, id string) (chronograf.Layout, error) { - var src chronograf.Layout - if err := s.client.db.View(func(tx *bolt.Tx) error { - if v := tx.Bucket(LayoutsBucket).Get([]byte(id)); v == nil { - return chronograf.ErrLayoutNotFound - } else if err := internal.UnmarshalLayout(v, &src); err != nil { - return err - } - return nil - }); err != nil { - return chronograf.Layout{}, err - } - - return src, nil -} - -// Update a Layout -func (s *LayoutsStore) Update(ctx context.Context, src chronograf.Layout) error { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - // Get an existing layout with the same ID. - b := tx.Bucket(LayoutsBucket) - if v := b.Get([]byte(src.ID)); v == nil { - return chronograf.ErrLayoutNotFound - } - - if v, err := internal.MarshalLayout(src); err != nil { - return err - } else if err := b.Put([]byte(src.ID), v); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil -} diff --git a/chronograf/bolt/mapping.go b/chronograf/bolt/mapping.go deleted file mode 100644 index 6cc224bd4db..00000000000 --- a/chronograf/bolt/mapping.go +++ /dev/null @@ -1,128 +0,0 @@ -package bolt - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure MappingsStore implements chronograf.MappingsStore. -var _ chronograf.MappingsStore = &MappingsStore{} - -var ( - // MappingsBucket is the bucket where organizations are stored. - MappingsBucket = []byte("MappingsV1") -) - -// MappingsStore uses bolt to store and retrieve Mappings -type MappingsStore struct { - client *Client -} - -// Migrate sets the default organization at runtime -func (s *MappingsStore) Migrate(ctx context.Context) error { - return nil -} - -// Add creates a new Mapping in the MappingsStore -func (s *MappingsStore) Add(ctx context.Context, o *chronograf.Mapping) (*chronograf.Mapping, error) { - err := s.client.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(MappingsBucket) - seq, err := b.NextSequence() - if err != nil { - return err - } - o.ID = fmt.Sprintf("%d", seq) - - v, err := internal.MarshalMapping(o) - if err != nil { - return err - } - - return b.Put([]byte(o.ID), v) - }) - - if err != nil { - return nil, err - } - - return o, nil -} - -// All returns all known organizations -func (s *MappingsStore) All(ctx context.Context) ([]chronograf.Mapping, error) { - var mappings []chronograf.Mapping - err := s.each(ctx, func(m *chronograf.Mapping) { - mappings = append(mappings, *m) - }) - - if err != nil { - return nil, err - } - - return mappings, nil -} - -// Delete the organization from MappingsStore -func (s *MappingsStore) Delete(ctx context.Context, o *chronograf.Mapping) error { - _, err := s.get(ctx, o.ID) - if err != nil { - return err - } - if err := s.client.db.Update(func(tx *bolt.Tx) error { - return tx.Bucket(MappingsBucket).Delete([]byte(o.ID)) - }); err != nil { - return err - } - return nil -} - -func (s *MappingsStore) get(ctx context.Context, id string) (*chronograf.Mapping, error) { - var o chronograf.Mapping - err := s.client.db.View(func(tx *bolt.Tx) error { - v := tx.Bucket(MappingsBucket).Get([]byte(id)) - if v == nil { - return chronograf.ErrMappingNotFound - } - return internal.UnmarshalMapping(v, &o) - }) - - if err != nil { - return nil, err - } - - return &o, nil -} - -func (s *MappingsStore) each(ctx context.Context, fn func(*chronograf.Mapping)) error { - return s.client.db.View(func(tx *bolt.Tx) error { - return tx.Bucket(MappingsBucket).ForEach(func(k, v []byte) error { - var m chronograf.Mapping - if err := internal.UnmarshalMapping(v, &m); err != nil { - return err - } - fn(&m) - return nil - }) - }) -} - -// Get returns a Mapping if the id exists. -func (s *MappingsStore) Get(ctx context.Context, id string) (*chronograf.Mapping, error) { - return s.get(ctx, id) -} - -// Update the organization in MappingsStore -func (s *MappingsStore) Update(ctx context.Context, o *chronograf.Mapping) error { - return s.client.db.Update(func(tx *bolt.Tx) error { - if v, err := internal.MarshalMapping(o); err != nil { - return err - } else if err := tx.Bucket(MappingsBucket).Put([]byte(o.ID), v); err != nil { - return err - } - return nil - }) -} diff --git a/chronograf/bolt/mapping_test.go b/chronograf/bolt/mapping_test.go deleted file mode 100644 index 27e695990f3..00000000000 --- a/chronograf/bolt/mapping_test.go +++ /dev/null @@ -1,480 +0,0 @@ -package bolt_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" -) - -var mappingCmpOptions = cmp.Options{ - cmpopts.IgnoreFields(chronograf.Mapping{}, "ID"), - cmpopts.EquateEmpty(), -} - -func TestMappingStore_Add(t *testing.T) { - type fields struct { - mappings []*chronograf.Mapping - } - type args struct { - mapping *chronograf.Mapping - } - type wants struct { - mapping *chronograf.Mapping - err error - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "default with wildcards", - args: args{ - mapping: &chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - }, - wants: wants{ - mapping: &chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - }, - }, - { - name: "simple", - args: args{ - mapping: &chronograf.Mapping{ - Organization: "default", - Provider: "github", - Scheme: "oauth2", - ProviderOrganization: "idk", - }, - }, - wants: wants{ - mapping: &chronograf.Mapping{ - Organization: "default", - Provider: "github", - Scheme: "oauth2", - ProviderOrganization: "idk", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.MappingsStore - ctx := context.Background() - - for _, mapping := range tt.fields.mappings { - // YOLO database prepopulation - _, _ = s.Add(ctx, mapping) - } - - tt.args.mapping, err = s.Add(ctx, tt.args.mapping) - - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("MappingsStore.Add() error = %v, want error %v", err, tt.wants.err) - return - } - - got, err := s.Get(ctx, tt.args.mapping.ID) - if err != nil { - t.Fatalf("failed to get mapping: %v", err) - return - } - if diff := cmp.Diff(got, tt.wants.mapping, mappingCmpOptions...); diff != "" { - t.Errorf("MappingStore.Add():\n-got/+want\ndiff %s", diff) - return - } - }) - } -} - -func TestMappingStore_All(t *testing.T) { - type fields struct { - mappings []*chronograf.Mapping - } - type wants struct { - mappings []chronograf.Mapping - err error - } - tests := []struct { - name string - fields fields - wants wants - }{ - { - name: "simple", - fields: fields{ - mappings: []*chronograf.Mapping{ - &chronograf.Mapping{ - Organization: "0", - Provider: "google", - Scheme: "ldap", - ProviderOrganization: "*", - }, - }, - }, - wants: wants{ - mappings: []chronograf.Mapping{ - chronograf.Mapping{ - Organization: "0", - Provider: "google", - Scheme: "ldap", - ProviderOrganization: "*", - }, - chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.MappingsStore - ctx := context.Background() - - for _, mapping := range tt.fields.mappings { - // YOLO database prepopulation - _, _ = s.Add(ctx, mapping) - } - - got, err := s.All(ctx) - - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("MappingsStore.All() error = %v, want error %v", err, tt.wants.err) - return - } - - if diff := cmp.Diff(got, tt.wants.mappings, mappingCmpOptions...); diff != "" { - t.Errorf("MappingStore.All():\n-got/+want\ndiff %s", diff) - return - } - }) - } -} - -func TestMappingStore_Delete(t *testing.T) { - type fields struct { - mappings []*chronograf.Mapping - } - type args struct { - mapping *chronograf.Mapping - } - type wants struct { - err error - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "simple", - fields: fields{ - mappings: []*chronograf.Mapping{ - &chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - &chronograf.Mapping{ - Organization: "0", - Provider: "google", - Scheme: "ldap", - ProviderOrganization: "*", - }, - }, - }, - args: args{ - mapping: &chronograf.Mapping{ - ID: "1", - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "mapping not found", - fields: fields{ - mappings: []*chronograf.Mapping{ - &chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - &chronograf.Mapping{ - Organization: "0", - Provider: "google", - Scheme: "ldap", - ProviderOrganization: "*", - }, - }, - }, - args: args{ - mapping: &chronograf.Mapping{ - ID: "0", - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - }, - wants: wants{ - err: chronograf.ErrMappingNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.MappingsStore - ctx := context.Background() - - for _, mapping := range tt.fields.mappings { - // YOLO database prepopulation - _, _ = s.Add(ctx, mapping) - } - - err = s.Delete(ctx, tt.args.mapping) - - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("MappingsStore.Delete() error = %v, want error %v", err, tt.wants.err) - return - } - }) - } -} - -func TestMappingStore_Get(t *testing.T) { - type fields struct { - mappings []*chronograf.Mapping - } - type args struct { - mappingID string - } - type wants struct { - mapping *chronograf.Mapping - err error - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "simple", - fields: fields{ - mappings: []*chronograf.Mapping{ - &chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - &chronograf.Mapping{ - Organization: "0", - Provider: "google", - Scheme: "ldap", - ProviderOrganization: "*", - }, - }, - }, - args: args{ - mappingID: "1", - }, - wants: wants{ - mapping: &chronograf.Mapping{ - ID: "1", - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - err: nil, - }, - }, - { - name: "mapping not found", - fields: fields{ - mappings: []*chronograf.Mapping{ - &chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - &chronograf.Mapping{ - Organization: "0", - Provider: "google", - Scheme: "ldap", - ProviderOrganization: "*", - }, - }, - }, - args: args{ - mappingID: "0", - }, - wants: wants{ - err: chronograf.ErrMappingNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.MappingsStore - ctx := context.Background() - - for _, mapping := range tt.fields.mappings { - // YOLO database prepopulation - _, _ = s.Add(ctx, mapping) - } - - got, err := s.Get(ctx, tt.args.mappingID) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("MappingsStore.Get() error = %v, want error %v", err, tt.wants.err) - return - } - if diff := cmp.Diff(got, tt.wants.mapping, mappingCmpOptions...); diff != "" { - t.Errorf("MappingStore.Get():\n-got/+want\ndiff %s", diff) - return - } - }) - } -} - -func TestMappingStore_Update(t *testing.T) { - type fields struct { - mappings []*chronograf.Mapping - } - type args struct { - mapping *chronograf.Mapping - } - type wants struct { - mapping *chronograf.Mapping - err error - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "simple", - fields: fields{ - mappings: []*chronograf.Mapping{ - &chronograf.Mapping{ - Organization: "default", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - &chronograf.Mapping{ - Organization: "0", - Provider: "google", - Scheme: "ldap", - ProviderOrganization: "*", - }, - }, - }, - args: args{ - mapping: &chronograf.Mapping{ - ID: "1", - Organization: "default", - Provider: "cool", - Scheme: "it", - ProviderOrganization: "works", - }, - }, - wants: wants{ - mapping: &chronograf.Mapping{ - ID: "1", - Organization: "default", - Provider: "cool", - Scheme: "it", - ProviderOrganization: "works", - }, - err: nil, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.MappingsStore - ctx := context.Background() - - for _, mapping := range tt.fields.mappings { - // YOLO database prepopulation - _, _ = s.Add(ctx, mapping) - } - - err = s.Update(ctx, tt.args.mapping) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("MappingsStore.Update() error = %v, want error %v", err, tt.wants.err) - return - } - if diff := cmp.Diff(tt.args.mapping, tt.wants.mapping, mappingCmpOptions...); diff != "" { - t.Errorf("MappingStore.Update():\n-got/+want\ndiff %s", diff) - return - } - }) - } -} diff --git a/chronograf/bolt/org_config.go b/chronograf/bolt/org_config.go deleted file mode 100644 index 855324c54d6..00000000000 --- a/chronograf/bolt/org_config.go +++ /dev/null @@ -1,236 +0,0 @@ -package bolt - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure OrganizationConfigStore implements chronograf.OrganizationConfigStore. -var _ chronograf.OrganizationConfigStore = &OrganizationConfigStore{} - -// OrganizationConfigBucket is used to store chronograf organization configurations -var OrganizationConfigBucket = []byte("OrganizationConfigV1") - -// OrganizationConfigStore uses bolt to store and retrieve organization configurations -type OrganizationConfigStore struct { - client *Client -} - -func (s *OrganizationConfigStore) Migrate(ctx context.Context) error { - return nil -} - -// Get retrieves an OrganizationConfig from the store -func (s *OrganizationConfigStore) Get(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - var c chronograf.OrganizationConfig - - err := s.client.db.View(func(tx *bolt.Tx) error { - return s.get(ctx, tx, orgID, &c) - }) - - if err != nil { - return nil, err - } - - return &c, nil -} - -func (s *OrganizationConfigStore) get(ctx context.Context, tx *bolt.Tx, orgID string, c *chronograf.OrganizationConfig) error { - v := tx.Bucket(OrganizationConfigBucket).Get([]byte(orgID)) - if len(v) == 0 { - return chronograf.ErrOrganizationConfigNotFound - } - return internal.UnmarshalOrganizationConfig(v, c) -} - -// FindOrCreate gets an OrganizationConfig from the store or creates one if none exists for this organization -func (s *OrganizationConfigStore) FindOrCreate(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - var c chronograf.OrganizationConfig - err := s.client.db.Update(func(tx *bolt.Tx) error { - err := s.get(ctx, tx, orgID, &c) - if err == chronograf.ErrOrganizationConfigNotFound { - c = newOrganizationConfig(orgID) - return s.put(ctx, tx, &c) - } - return err - }) - - if err != nil { - return nil, err - } - return &c, nil -} - -// Put replaces the OrganizationConfig in the store -func (s *OrganizationConfigStore) Put(ctx context.Context, c *chronograf.OrganizationConfig) error { - return s.client.db.Update(func(tx *bolt.Tx) error { - return s.put(ctx, tx, c) - }) -} - -func (s *OrganizationConfigStore) put(ctx context.Context, tx *bolt.Tx, c *chronograf.OrganizationConfig) error { - if c == nil { - return fmt.Errorf("config provided was nil") - } - if v, err := internal.MarshalOrganizationConfig(c); err != nil { - return err - } else if err := tx.Bucket(OrganizationConfigBucket).Put([]byte(c.OrganizationID), v); err != nil { - return err - } - return nil -} - -func newOrganizationConfig(orgID string) chronograf.OrganizationConfig { - return chronograf.OrganizationConfig{ - OrganizationID: orgID, - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "severity", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "text", - }, - { - Type: "color", - Name: "emerg", - Value: "ruby", - }, - { - Type: "color", - Name: "alert", - Value: "fire", - }, - { - Type: "color", - Name: "crit", - Value: "curacao", - }, - { - Type: "color", - Name: "err", - Value: "tiger", - }, - { - Type: "color", - Name: "warning", - Value: "pineapple", - }, - { - Type: "color", - Name: "notice", - Value: "rainforest", - }, - { - Type: "color", - Name: "info", - Value: "star", - }, - { - Type: "color", - Name: "debug", - Value: "wolf", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Proc ID", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - } -} diff --git a/chronograf/bolt/org_config_test.go b/chronograf/bolt/org_config_test.go deleted file mode 100644 index 3c4522b16df..00000000000 --- a/chronograf/bolt/org_config_test.go +++ /dev/null @@ -1,1160 +0,0 @@ -package bolt_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestOrganizationConfig_FindOrCreate(t *testing.T) { - type args struct { - organizationID string - } - type wants struct { - organizationConfig *chronograf.OrganizationConfig - err error - } - tests := []struct { - name string - args args - addFirst bool - wants wants - }{ - { - name: "Get non-existent default config from default org", - args: args{ - organizationID: "default", - }, - addFirst: false, - wants: wants{ - organizationConfig: &chronograf.OrganizationConfig{ - OrganizationID: "default", - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "severity", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "text", - }, - { - Type: "color", - Name: "emerg", - Value: "ruby", - }, - { - Type: "color", - Name: "alert", - Value: "fire", - }, - { - Type: "color", - Name: "crit", - Value: "curacao", - }, - { - Type: "color", - Name: "err", - Value: "tiger", - }, - { - Type: "color", - Name: "warning", - Value: "pineapple", - }, - { - Type: "color", - Name: "notice", - Value: "rainforest", - }, - { - Type: "color", - Name: "info", - Value: "star", - }, - { - Type: "color", - Name: "debug", - Value: "wolf", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Proc ID", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "Get non-existent default config from non-default org", - args: args{ - organizationID: "1", - }, - addFirst: false, - wants: wants{ - organizationConfig: &chronograf.OrganizationConfig{ - OrganizationID: "1", - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "severity", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "text", - }, - { - Type: "color", - Name: "emerg", - Value: "ruby", - }, - { - Type: "color", - Name: "alert", - Value: "fire", - }, - { - Type: "color", - Name: "crit", - Value: "curacao", - }, - { - Type: "color", - Name: "err", - Value: "tiger", - }, - { - Type: "color", - Name: "warning", - Value: "pineapple", - }, - { - Type: "color", - Name: "notice", - Value: "rainforest", - }, - { - Type: "color", - Name: "info", - Value: "star", - }, - { - Type: "color", - Name: "debug", - Value: "wolf", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Proc ID", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "Get existing/modified config from default org", - args: args{ - organizationID: "default", - }, - addFirst: true, - wants: wants{ - organizationConfig: &chronograf.OrganizationConfig{ - OrganizationID: "default", - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "hidden", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "text", - }, - { - Type: "color", - Name: "emerg", - Value: "ruby", - }, - { - Type: "color", - Name: "alert", - Value: "fire", - }, - { - Type: "color", - Name: "crit", - Value: "curacao", - }, - { - Type: "color", - Name: "err", - Value: "tiger", - }, - { - Type: "color", - Name: "warning", - Value: "pineapple", - }, - { - Type: "color", - Name: "notice", - Value: "rainforest", - }, - { - Type: "color", - Name: "info", - Value: "star", - }, - { - Type: "color", - Name: "debug", - Value: "wolf", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Proc ID", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "Get existing/modified config from non-default org", - args: args{ - organizationID: "1", - }, - addFirst: true, - wants: wants{ - organizationConfig: &chronograf.OrganizationConfig{ - OrganizationID: "1", - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "hidden", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "text", - }, - { - Type: "color", - Name: "emerg", - Value: "ruby", - }, - { - Type: "color", - Name: "alert", - Value: "fire", - }, - { - Type: "color", - Name: "crit", - Value: "curacao", - }, - { - Type: "color", - Name: "err", - Value: "tiger", - }, - { - Type: "color", - Name: "warning", - Value: "pineapple", - }, - { - Type: "color", - Name: "notice", - Value: "rainforest", - }, - { - Type: "color", - Name: "info", - Value: "star", - }, - { - Type: "color", - Name: "debug", - Value: "wolf", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Proc ID", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - }, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationConfigStore - - if tt.addFirst { - if err := s.Put(context.Background(), tt.wants.organizationConfig); err != nil { - t.Fatal(err) - } - } - - got, err := s.FindOrCreate(context.Background(), tt.args.organizationID) - - if (tt.wants.err != nil) != (err != nil) { - t.Errorf("%q. OrganizationConfigStore.FindOrCreate() error = %v, wantErr %v", tt.name, err, tt.wants.err) - continue - } - if diff := cmp.Diff(got, tt.wants.organizationConfig); diff != "" { - t.Errorf("%q. OrganizationConfigStore.FindOrCreate():\n-got/+want\ndiff %s", tt.name, diff) - } - - d, err := s.Get(context.Background(), tt.args.organizationID) - if err != nil { - t.Errorf("%q. OrganizationConfigStore.Get(): Failed to retrieve organization config", tt.name) - } - if diff := cmp.Diff(got, d); diff != "" { - t.Errorf("%q. OrganizationConfigStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestOrganizationConfig_Put(t *testing.T) { - type args struct { - organizationConfig *chronograf.OrganizationConfig - organizationID string - } - type wants struct { - organizationConfig *chronograf.OrganizationConfig - err error - } - tests := []struct { - name string - args args - wants wants - }{ - { - name: "Set default org config", - args: args{ - organizationConfig: &chronograf.OrganizationConfig{ - OrganizationID: "default", - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Milkshake", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - organizationID: "default", - }, - wants: wants{ - organizationConfig: &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Milkshake", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - OrganizationID: "default", - }, - }, - }, - { - name: "Set non-default org config", - args: args{ - organizationConfig: &chronograf.OrganizationConfig{ - OrganizationID: "1337", - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Milkshake", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - organizationID: "1337", - }, - wants: wants{ - organizationConfig: &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Milkshake", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - OrganizationID: "1337", - }, - }, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationConfigStore - err = s.Put(context.Background(), tt.args.organizationConfig) - if (tt.wants.err != nil) != (err != nil) { - t.Errorf("%q. OrganizationConfigStore.Put() error = %v, wantErr %v", tt.name, err, tt.wants.err) - continue - } - - got, _ := s.FindOrCreate(context.Background(), tt.args.organizationID) - if (tt.wants.err != nil) != (err != nil) { - t.Errorf("%q. OrganizationConfigStore.Put() error = %v, wantErr %v", tt.name, err, tt.wants.err) - continue - } - - if diff := cmp.Diff(got, tt.wants.organizationConfig); diff != "" { - t.Errorf("%q. OrganizationConfigStore.Put():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} diff --git a/chronograf/bolt/organizations.go b/chronograf/bolt/organizations.go deleted file mode 100644 index f3e0f687bd9..00000000000 --- a/chronograf/bolt/organizations.go +++ /dev/null @@ -1,304 +0,0 @@ -package bolt - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - "github.com/influxdata/influxdb/v2/chronograf/organizations" - bolt "go.etcd.io/bbolt" -) - -// Ensure OrganizationsStore implements chronograf.OrganizationsStore. -var _ chronograf.OrganizationsStore = &OrganizationsStore{} - -var ( - // OrganizationsBucket is the bucket where organizations are stored. - OrganizationsBucket = []byte("OrganizationsV1") - // DefaultOrganizationID is the ID of the default organization. - DefaultOrganizationID = []byte("default") -) - -const ( - // DefaultOrganizationName is the Name of the default organization - DefaultOrganizationName string = "Default" - // DefaultOrganizationRole is the DefaultRole for the Default organization - DefaultOrganizationRole string = "member" -) - -// OrganizationsStore uses bolt to store and retrieve Organizations -type OrganizationsStore struct { - client *Client -} - -// Migrate sets the default organization at runtime -func (s *OrganizationsStore) Migrate(ctx context.Context) error { - return s.CreateDefault(ctx) -} - -// CreateDefault does a findOrCreate on the default organization -func (s *OrganizationsStore) CreateDefault(ctx context.Context) error { - o := chronograf.Organization{ - ID: string(DefaultOrganizationID), - Name: DefaultOrganizationName, - DefaultRole: DefaultOrganizationRole, - } - - m := chronograf.Mapping{ - ID: string(DefaultOrganizationID), - Organization: string(DefaultOrganizationID), - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - } - return s.client.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(OrganizationsBucket) - v := b.Get(DefaultOrganizationID) - if v != nil { - return nil - } - if v, err := internal.MarshalOrganization(&o); err != nil { - return err - } else if err := b.Put(DefaultOrganizationID, v); err != nil { - return err - } - - b = tx.Bucket(MappingsBucket) - v = b.Get(DefaultOrganizationID) - if v != nil { - return nil - } - if v, err := internal.MarshalMapping(&m); err != nil { - return err - } else if err := b.Put(DefaultOrganizationID, v); err != nil { - return err - } - - return nil - }) -} - -func (s *OrganizationsStore) nameIsUnique(ctx context.Context, name string) bool { - _, err := s.Get(ctx, chronograf.OrganizationQuery{Name: &name}) - switch err { - case chronograf.ErrOrganizationNotFound: - return true - default: - return false - } -} - -// DefaultOrganizationID returns the ID of the default organization -func (s *OrganizationsStore) DefaultOrganization(ctx context.Context) (*chronograf.Organization, error) { - var org chronograf.Organization - if err := s.client.db.View(func(tx *bolt.Tx) error { - v := tx.Bucket(OrganizationsBucket).Get(DefaultOrganizationID) - return internal.UnmarshalOrganization(v, &org) - }); err != nil { - return nil, err - } - - return &org, nil -} - -// Add creates a new Organization in the OrganizationsStore -func (s *OrganizationsStore) Add(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) { - if !s.nameIsUnique(ctx, o.Name) { - return nil, chronograf.ErrOrganizationAlreadyExists - } - err := s.client.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(OrganizationsBucket) - seq, err := b.NextSequence() - if err != nil { - return err - } - o.ID = fmt.Sprintf("%d", seq) - - v, err := internal.MarshalOrganization(o) - if err != nil { - return err - } - - return b.Put([]byte(o.ID), v) - }) - - return o, err -} - -// All returns all known organizations -func (s *OrganizationsStore) All(ctx context.Context) ([]chronograf.Organization, error) { - var orgs []chronograf.Organization - err := s.each(ctx, func(o *chronograf.Organization) { - orgs = append(orgs, *o) - }) - - if err != nil { - return nil, err - } - - return orgs, nil -} - -// Delete the organization from OrganizationsStore -func (s *OrganizationsStore) Delete(ctx context.Context, o *chronograf.Organization) error { - if o.ID == string(DefaultOrganizationID) { - return chronograf.ErrCannotDeleteDefaultOrganization - } - _, err := s.get(ctx, o.ID) - if err != nil { - return err - } - if err := s.client.db.Update(func(tx *bolt.Tx) error { - return tx.Bucket(OrganizationsBucket).Delete([]byte(o.ID)) - }); err != nil { - return err - } - - // Dependent Delete of all resources - - // Each of the associated organization stores expects organization to be - // set on the context. - ctx = context.WithValue(ctx, organizations.ContextKey, o.ID) - - sourcesStore := organizations.NewSourcesStore(s.client.SourcesStore, o.ID) - sources, err := sourcesStore.All(ctx) - if err != nil { - return err - } - for _, source := range sources { - if err := sourcesStore.Delete(ctx, source); err != nil { - return err - } - } - - serversStore := organizations.NewServersStore(s.client.ServersStore, o.ID) - servers, err := serversStore.All(ctx) - if err != nil { - return err - } - for _, server := range servers { - if err := serversStore.Delete(ctx, server); err != nil { - return err - } - } - - dashboardsStore := organizations.NewDashboardsStore(s.client.DashboardsStore, o.ID) - dashboards, err := dashboardsStore.All(ctx) - if err != nil { - return err - } - for _, dashboard := range dashboards { - if err := dashboardsStore.Delete(ctx, dashboard); err != nil { - return err - } - } - - usersStore := organizations.NewUsersStore(s.client.UsersStore, o.ID) - users, err := usersStore.All(ctx) - if err != nil { - return err - } - for _, user := range users { - if err := usersStore.Delete(ctx, &user); err != nil { - return err - } - } - - mappings, err := s.client.MappingsStore.All(ctx) - if err != nil { - return err - } - for _, mapping := range mappings { - if mapping.Organization == o.ID { - if err := s.client.MappingsStore.Delete(ctx, &mapping); err != nil { - return err - } - } - } - - return nil -} - -func (s *OrganizationsStore) get(ctx context.Context, id string) (*chronograf.Organization, error) { - var o chronograf.Organization - err := s.client.db.View(func(tx *bolt.Tx) error { - v := tx.Bucket(OrganizationsBucket).Get([]byte(id)) - if v == nil { - return chronograf.ErrOrganizationNotFound - } - return internal.UnmarshalOrganization(v, &o) - }) - - if err != nil { - return nil, err - } - - return &o, nil -} - -func (s *OrganizationsStore) each(ctx context.Context, fn func(*chronograf.Organization)) error { - return s.client.db.View(func(tx *bolt.Tx) error { - return tx.Bucket(OrganizationsBucket).ForEach(func(k, v []byte) error { - var org chronograf.Organization - if err := internal.UnmarshalOrganization(v, &org); err != nil { - return err - } - fn(&org) - return nil - }) - }) -} - -// Get returns a Organization if the id exists. -// If an ID is provided in the query, the lookup time for an organization will be O(1). -// If Name is provided, the lookup time will be O(n). -// Get expects that only one of ID or Name will be specified, but will prefer ID over Name if both are specified. -func (s *OrganizationsStore) Get(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID != nil { - return s.get(ctx, *q.ID) - } - - if q.Name != nil { - var org *chronograf.Organization - err := s.each(ctx, func(o *chronograf.Organization) { - if org != nil { - return - } - - if o.Name == *q.Name { - org = o - } - }) - - if err != nil { - return nil, err - } - - if org == nil { - return nil, chronograf.ErrOrganizationNotFound - } - - return org, nil - } - return nil, fmt.Errorf("must specify either ID, or Name in OrganizationQuery") -} - -// Update the organization in OrganizationsStore -func (s *OrganizationsStore) Update(ctx context.Context, o *chronograf.Organization) error { - org, err := s.get(ctx, o.ID) - if err != nil { - return err - } - if o.Name != org.Name && !s.nameIsUnique(ctx, o.Name) { - return chronograf.ErrOrganizationAlreadyExists - } - return s.client.db.Update(func(tx *bolt.Tx) error { - if v, err := internal.MarshalOrganization(o); err != nil { - return err - } else if err := tx.Bucket(OrganizationsBucket).Put([]byte(o.ID), v); err != nil { - return err - } - return nil - }) -} diff --git a/chronograf/bolt/organizations_test.go b/chronograf/bolt/organizations_test.go deleted file mode 100644 index 827fe213056..00000000000 --- a/chronograf/bolt/organizations_test.go +++ /dev/null @@ -1,659 +0,0 @@ -package bolt_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -var orgCmpOptions = cmp.Options{ - cmpopts.IgnoreFields(chronograf.Organization{}, "ID"), - cmpopts.EquateEmpty(), -} - -func TestOrganizationsStore_GetWithName(t *testing.T) { - type args struct { - ctx context.Context - org *chronograf.Organization - } - tests := []struct { - name string - args args - want *chronograf.Organization - wantErr bool - addFirst bool - }{ - { - name: "Organization not found", - args: args{ - ctx: context.Background(), - org: &chronograf.Organization{}, - }, - wantErr: true, - }, - { - name: "Get Organization", - args: args{ - ctx: context.Background(), - org: &chronograf.Organization{ - Name: "EE - Evil Empire", - }, - }, - want: &chronograf.Organization{ - Name: "EE - Evil Empire", - }, - addFirst: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationsStore - if tt.addFirst { - tt.args.org, err = s.Add(tt.args.ctx, tt.args.org) - if err != nil { - t.Fatal(err) - } - } - - got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{Name: &tt.args.org.Name}) - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if tt.wantErr { - return - } - if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - - }) - } -} - -func TestOrganizationsStore_GetWithID(t *testing.T) { - type args struct { - ctx context.Context - org *chronograf.Organization - } - tests := []struct { - name string - args args - want *chronograf.Organization - wantErr bool - addFirst bool - }{ - { - name: "Organization not found", - args: args{ - ctx: context.Background(), - org: &chronograf.Organization{ - ID: "1234", - }, - }, - wantErr: true, - }, - { - name: "Get Organization", - args: args{ - ctx: context.Background(), - org: &chronograf.Organization{ - Name: "EE - Evil Empire", - }, - }, - want: &chronograf.Organization{ - Name: "EE - Evil Empire", - }, - addFirst: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationsStore - if tt.addFirst { - tt.args.org, err = s.Add(tt.args.ctx, tt.args.org) - if err != nil { - t.Fatal(err) - } - } - - got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{ID: &tt.args.org.ID}) - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - return - } - if tt.wantErr { - return - } - if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - - }) - } -} - -func TestOrganizationsStore_All(t *testing.T) { - type args struct { - ctx context.Context - orgs []chronograf.Organization - } - tests := []struct { - name string - args args - want []chronograf.Organization - addFirst bool - }{ - { - name: "Get Organizations", - args: args{ - ctx: context.Background(), - orgs: []chronograf.Organization{ - { - Name: "EE - Evil Empire", - DefaultRole: roles.MemberRoleName, - }, - { - Name: "The Good Place", - DefaultRole: roles.EditorRoleName, - }, - }, - }, - want: []chronograf.Organization{ - { - Name: "EE - Evil Empire", - DefaultRole: roles.MemberRoleName, - }, - { - Name: "The Good Place", - DefaultRole: roles.EditorRoleName, - }, - { - Name: bolt.DefaultOrganizationName, - DefaultRole: bolt.DefaultOrganizationRole, - }, - }, - addFirst: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationsStore - if tt.addFirst { - for _, org := range tt.args.orgs { - _, err = s.Add(tt.args.ctx, &org) - if err != nil { - t.Fatal(err) - } - } - } - - got, err := s.All(tt.args.ctx) - if err != nil { - t.Fatal(err) - return - } - if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.All():\n-got/+want\ndiff %s", tt.name, diff) - } - }) - } -} - -func TestOrganizationsStore_Update(t *testing.T) { - type fields struct { - orgs []chronograf.Organization - } - type args struct { - ctx context.Context - initial *chronograf.Organization - updates *chronograf.Organization - } - tests := []struct { - name string - fields fields - args args - addFirst bool - want *chronograf.Organization - wantErr bool - }{ - { - name: "No such organization", - fields: fields{}, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - ID: "1234", - Name: "The Okay Place", - }, - updates: &chronograf.Organization{}, - }, - wantErr: true, - }, - { - name: "Update organization name", - fields: fields{}, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - Name: "The Good Place", - }, - updates: &chronograf.Organization{ - Name: "The Bad Place", - }, - }, - want: &chronograf.Organization{ - Name: "The Bad Place", - }, - addFirst: true, - }, - { - name: "Update organization default role", - fields: fields{}, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - Name: "The Good Place", - }, - updates: &chronograf.Organization{ - DefaultRole: roles.ViewerRoleName, - }, - }, - want: &chronograf.Organization{ - Name: "The Good Place", - DefaultRole: roles.ViewerRoleName, - }, - addFirst: true, - }, - { - name: "Update organization name and default role", - fields: fields{}, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - Name: "The Good Place", - DefaultRole: roles.AdminRoleName, - }, - updates: &chronograf.Organization{ - Name: "The Bad Place", - DefaultRole: roles.ViewerRoleName, - }, - }, - want: &chronograf.Organization{ - Name: "The Bad Place", - DefaultRole: roles.ViewerRoleName, - }, - addFirst: true, - }, - { - name: "Update organization name, role", - fields: fields{}, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - Name: "The Good Place", - DefaultRole: roles.ViewerRoleName, - }, - updates: &chronograf.Organization{ - Name: "The Bad Place", - DefaultRole: roles.AdminRoleName, - }, - }, - want: &chronograf.Organization{ - Name: "The Bad Place", - DefaultRole: roles.AdminRoleName, - }, - addFirst: true, - }, - { - name: "Update organization name", - fields: fields{}, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - Name: "The Good Place", - DefaultRole: roles.EditorRoleName, - }, - updates: &chronograf.Organization{ - Name: "The Bad Place", - }, - }, - want: &chronograf.Organization{ - Name: "The Bad Place", - DefaultRole: roles.EditorRoleName, - }, - addFirst: true, - }, - { - name: "Update organization name", - fields: fields{}, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - Name: "The Good Place", - }, - updates: &chronograf.Organization{ - Name: "The Bad Place", - }, - }, - want: &chronograf.Organization{ - Name: "The Bad Place", - }, - addFirst: true, - }, - { - name: "Update organization name - name already taken", - fields: fields{ - orgs: []chronograf.Organization{ - { - Name: "The Bad Place", - }, - }, - }, - args: args{ - ctx: context.Background(), - initial: &chronograf.Organization{ - Name: "The Good Place", - }, - updates: &chronograf.Organization{ - Name: "The Bad Place", - }, - }, - wantErr: true, - addFirst: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationsStore - - for _, org := range tt.fields.orgs { - _, err = s.Add(tt.args.ctx, &org) - if err != nil { - t.Fatal(err) - } - } - - if tt.addFirst { - tt.args.initial, err = s.Add(tt.args.ctx, tt.args.initial) - if err != nil { - t.Fatal(err) - } - } - - if tt.args.updates.Name != "" { - tt.args.initial.Name = tt.args.updates.Name - } - if tt.args.updates.DefaultRole != "" { - tt.args.initial.DefaultRole = tt.args.updates.DefaultRole - } - - if err := s.Update(tt.args.ctx, tt.args.initial); (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - - // for the empty test - if tt.want == nil { - continue - } - - got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{Name: &tt.args.initial.Name}) - if err != nil { - t.Fatalf("failed to get organization: %v", err) - } - if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestOrganizationStore_Delete(t *testing.T) { - type args struct { - ctx context.Context - org *chronograf.Organization - } - tests := []struct { - name string - args args - addFirst bool - wantErr bool - }{ - { - name: "No such organization", - args: args{ - ctx: context.Background(), - org: &chronograf.Organization{ - ID: "10", - }, - }, - wantErr: true, - }, - { - name: "Delete new organization", - args: args{ - ctx: context.Background(), - org: &chronograf.Organization{ - Name: "The Deleted Place", - }, - }, - addFirst: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationsStore - - if tt.addFirst { - tt.args.org, _ = s.Add(tt.args.ctx, tt.args.org) - } - if err := s.Delete(tt.args.ctx, tt.args.org); (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func TestOrganizationStore_DeleteDefaultOrg(t *testing.T) { - type args struct { - ctx context.Context - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "Delete the default organization", - args: args{ - ctx: context.Background(), - }, - wantErr: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationsStore - - defaultOrg, err := s.DefaultOrganization(tt.args.ctx) - if err != nil { - t.Fatal(err) - } - if err := s.Delete(tt.args.ctx, defaultOrg); (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func TestOrganizationsStore_Add(t *testing.T) { - type fields struct { - orgs []chronograf.Organization - } - type args struct { - ctx context.Context - org *chronograf.Organization - } - tests := []struct { - name string - fields fields - args args - want *chronograf.Organization - wantErr bool - }{ - { - name: "Add organization - organization already exists", - fields: fields{ - orgs: []chronograf.Organization{ - { - Name: "The Good Place", - }, - }, - }, - args: args{ - ctx: context.Background(), - org: &chronograf.Organization{ - Name: "The Good Place", - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.OrganizationsStore - - for _, org := range tt.fields.orgs { - _, err = s.Add(tt.args.ctx, &org) - if err != nil { - t.Fatal(err) - } - } - - _, err = s.Add(tt.args.ctx, tt.args.org) - - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - - // for the empty test - if tt.want == nil { - continue - } - - got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{Name: &tt.args.org.Name}) - if err != nil { - t.Fatalf("failed to get organization: %v", err) - } - if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestOrganizationsStore_DefaultOrganization(t *testing.T) { - type fields struct { - orgs []chronograf.Organization - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - want *chronograf.Organization - wantErr bool - }{ - { - name: "Get Default Organization", - fields: fields{ - orgs: []chronograf.Organization{ - { - Name: "The Good Place", - }, - }, - }, - args: args{ - ctx: context.Background(), - }, - want: &chronograf.Organization{ - ID: string(bolt.DefaultOrganizationID), - Name: bolt.DefaultOrganizationName, - DefaultRole: bolt.DefaultOrganizationRole, - }, - wantErr: false, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - s := client.OrganizationsStore - - for _, org := range tt.fields.orgs { - _, err = s.Add(tt.args.ctx, &org) - if err != nil { - t.Fatal(err) - } - } - - got, err := s.DefaultOrganization(tt.args.ctx) - - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - - if tt.want == nil { - continue - } - - if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} diff --git a/chronograf/bolt/servers.go b/chronograf/bolt/servers.go deleted file mode 100644 index dbe694f309f..00000000000 --- a/chronograf/bolt/servers.go +++ /dev/null @@ -1,183 +0,0 @@ -package bolt - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure ServersStore implements chronograf.ServersStore. -var _ chronograf.ServersStore = &ServersStore{} - -// ServersBucket is the bolt bucket to store lists of servers -var ServersBucket = []byte("Servers") - -// ServersStore is the bolt implementation to store servers in a store. -// Used store servers that are associated in some way with a source -type ServersStore struct { - client *Client -} - -func (s *ServersStore) Migrate(ctx context.Context) error { - servers, err := s.All(ctx) - if err != nil { - return err - } - - defaultOrg, err := s.client.OrganizationsStore.DefaultOrganization(ctx) - if err != nil { - return err - } - - for _, server := range servers { - if server.Organization == "" { - server.Organization = defaultOrg.ID - if err := s.Update(ctx, server); err != nil { - return nil - } - } - } - - return nil -} - -// All returns all known servers -func (s *ServersStore) All(ctx context.Context) ([]chronograf.Server, error) { - var srcs []chronograf.Server - if err := s.client.db.View(func(tx *bolt.Tx) error { - var err error - srcs, err = s.all(ctx, tx) - if err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - - return srcs, nil - -} - -// Add creates a new Server in the ServerStore. -func (s *ServersStore) Add(ctx context.Context, src chronograf.Server) (chronograf.Server, error) { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(ServersBucket) - seq, err := b.NextSequence() - if err != nil { - return err - } - src.ID = int(seq) - - // make the newly added source "active" - s.resetActiveServer(ctx, tx) - src.Active = true - - if v, err := internal.MarshalServer(src); err != nil { - return err - } else if err := b.Put(itob(src.ID), v); err != nil { - return err - } - return nil - }); err != nil { - return chronograf.Server{}, err - } - - return src, nil -} - -// Delete removes the Server from the ServersStore -func (s *ServersStore) Delete(ctx context.Context, src chronograf.Server) error { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket(ServersBucket).Delete(itob(src.ID)); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil -} - -// Get returns a Server if the id exists. -func (s *ServersStore) Get(ctx context.Context, id int) (chronograf.Server, error) { - var src chronograf.Server - if err := s.client.db.View(func(tx *bolt.Tx) error { - if v := tx.Bucket(ServersBucket).Get(itob(id)); v == nil { - return chronograf.ErrServerNotFound - } else if err := internal.UnmarshalServer(v, &src); err != nil { - return err - } - return nil - }); err != nil { - return chronograf.Server{}, err - } - - return src, nil -} - -// Update a Server -func (s *ServersStore) Update(ctx context.Context, src chronograf.Server) error { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - // Get an existing server with the same ID. - b := tx.Bucket(ServersBucket) - if v := b.Get(itob(src.ID)); v == nil { - return chronograf.ErrServerNotFound - } - - // only one server can be active at a time - if src.Active { - s.resetActiveServer(ctx, tx) - } - - if v, err := internal.MarshalServer(src); err != nil { - return err - } else if err := b.Put(itob(src.ID), v); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil -} - -func (s *ServersStore) all(ctx context.Context, tx *bolt.Tx) ([]chronograf.Server, error) { - var srcs []chronograf.Server - if err := tx.Bucket(ServersBucket).ForEach(func(k, v []byte) error { - var src chronograf.Server - if err := internal.UnmarshalServer(v, &src); err != nil { - return err - } - srcs = append(srcs, src) - return nil - }); err != nil { - return srcs, err - } - return srcs, nil -} - -// resetActiveServer unsets the Active flag on all sources -func (s *ServersStore) resetActiveServer(ctx context.Context, tx *bolt.Tx) error { - b := tx.Bucket(ServersBucket) - srcs, err := s.all(ctx, tx) - if err != nil { - return err - } - - for _, other := range srcs { - if other.Active { - other.Active = false - if v, err := internal.MarshalServer(other); err != nil { - return err - } else if err := b.Put(itob(other.ID), v); err != nil { - return err - } - } - } - return nil -} diff --git a/chronograf/bolt/servers_test.go b/chronograf/bolt/servers_test.go deleted file mode 100644 index dbf6f72cb1f..00000000000 --- a/chronograf/bolt/servers_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package bolt_test - -import ( - "context" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure an ServerStore can store, retrieve, update, and delete servers. -func TestServerStore(t *testing.T) { - c, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer c.Close() - - s := c.ServersStore - - srcs := []chronograf.Server{ - chronograf.Server{ - Name: "Of Truth", - SrcID: 10, - Username: "marty", - Password: "I❤️ jennifer parker", - URL: "toyota-hilux.lyon-estates.local", - Active: false, - Organization: "133", - InsecureSkipVerify: true, - }, - chronograf.Server{ - Name: "HipToBeSquare", - SrcID: 12, - Username: "calvinklein", - Password: "chuck b3rry", - URL: "toyota-hilux.lyon-estates.local", - Active: false, - Organization: "133", - InsecureSkipVerify: false, - }, - } - - // Add new srcs. - ctx := context.Background() - for i, src := range srcs { - if srcs[i], err = s.Add(ctx, src); err != nil { - t.Fatal(err) - } - // Confirm first src in the store is the same as the original. - if actual, err := s.Get(ctx, srcs[i].ID); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(actual, srcs[i]) { - t.Fatalf("server loaded is different then server saved; actual: %v, expected %v", actual, srcs[i]) - } - } - - // Update server. - srcs[0].Username = "calvinklein" - srcs[1].Name = "Enchantment Under the Sea Dance" - srcs[1].Organization = "1234" - if err := s.Update(ctx, srcs[0]); err != nil { - t.Fatal(err) - } else if err := s.Update(ctx, srcs[1]); err != nil { - t.Fatal(err) - } - - // Confirm servers have updated. - if src, err := s.Get(ctx, srcs[0].ID); err != nil { - t.Fatal(err) - } else if src.Username != "calvinklein" { - t.Fatalf("server 0 update error: got %v, expected %v", src.Username, "calvinklein") - } - if src, err := s.Get(ctx, srcs[1].ID); err != nil { - t.Fatal(err) - } else if src.Name != "Enchantment Under the Sea Dance" { - t.Fatalf("server 1 update error: got %v, expected %v", src.Name, "Enchantment Under the Sea Dance") - } else if src.Organization != "1234" { - t.Fatalf("server 1 update error: got %v, expected %v", src.Organization, "1234") - } - - // Attempt to make two active sources - srcs[0].Active = true - srcs[1].Active = true - if err := s.Update(ctx, srcs[0]); err != nil { - t.Fatal(err) - } else if err := s.Update(ctx, srcs[1]); err != nil { - t.Fatal(err) - } - - if actual, err := s.Get(ctx, srcs[0].ID); err != nil { - t.Fatal(err) - } else if actual.Active { - t.Fatal("Able to set two active servers when only one should be permitted") - } - - // Delete an server. - if err := s.Delete(ctx, srcs[0]); err != nil { - t.Fatal(err) - } - - // Confirm server has been deleted. - if _, err := s.Get(ctx, srcs[0].ID); err != chronograf.ErrServerNotFound { - t.Fatalf("server delete error: got %v, expected %v", err, chronograf.ErrServerNotFound) - } - - if bsrcs, err := s.All(ctx); err != nil { - t.Fatal(err) - } else if len(bsrcs) != 1 { - t.Fatalf("After delete All returned incorrect number of srcs; got %d, expected %d", len(bsrcs), 1) - } else if !reflect.DeepEqual(bsrcs[0], srcs[1]) { - t.Fatalf("After delete All returned incorrect server; got %v, expected %v", bsrcs[0], srcs[1]) - } -} diff --git a/chronograf/bolt/sources.go b/chronograf/bolt/sources.go deleted file mode 100644 index 9a608f3ce23..00000000000 --- a/chronograf/bolt/sources.go +++ /dev/null @@ -1,288 +0,0 @@ -package bolt - -import ( - "context" - "math" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - "github.com/influxdata/influxdb/v2/chronograf/roles" - bolt "go.etcd.io/bbolt" -) - -// Ensure SourcesStore implements chronograf.SourcesStore. -var _ chronograf.SourcesStore = &SourcesStore{} - -// SourcesBucket is the bolt bucket used to store source information -var SourcesBucket = []byte("Sources") - -// DefaultSource is a temporary measure for single-binary. -var DefaultSource = &chronograf.Source{ - ID: math.MaxInt32, // Use large number to avoid possible collisions in older chronograf. - Name: "autogen", - Type: "influx", - URL: "http://localhost:8086", - Default: false, -} - -// SourcesStore is a bolt implementation to store time-series source information. -type SourcesStore struct { - client *Client -} - -// Migrate adds the default source to an existing boltdb. -func (s *SourcesStore) Migrate(ctx context.Context) error { - sources, err := s.All(ctx) - if err != nil { - return err - } - if len(sources) == 0 { - if err := s.Put(ctx, DefaultSource); err != nil { - return err - } - } - - defaultOrg, err := s.client.OrganizationsStore.DefaultOrganization(ctx) - if err != nil { - return err - } - - for _, source := range sources { - if source.Organization == "" { - source.Organization = defaultOrg.ID - } - if source.Role == "" { - source.Role = roles.ViewerRoleName - } - if err := s.Update(ctx, source); err != nil { - return nil - } - } - - return nil -} - -// All returns all known sources -func (s *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) { - var srcs []chronograf.Source - if err := s.client.db.View(func(tx *bolt.Tx) error { - var err error - srcs, err = s.all(ctx, tx) - if err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - - return srcs, nil - -} - -// Add creates a new Source in the SourceStore. -func (s *SourcesStore) Add(ctx context.Context, src chronograf.Source) (chronograf.Source, error) { - - // force first source added to be default - if srcs, err := s.All(ctx); err != nil { - return chronograf.Source{}, err - } else if len(srcs) == 0 { - src.Default = true - } - - if err := s.client.db.Update(func(tx *bolt.Tx) error { - return s.add(ctx, &src, tx) - }); err != nil { - return chronograf.Source{}, err - } - - return src, nil -} - -// Delete removes the Source from the SourcesStore -func (s *SourcesStore) Delete(ctx context.Context, src chronograf.Source) error { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - if err := s.setRandomDefault(ctx, src, tx); err != nil { - return err - } - return s.delete(ctx, src, tx) - }); err != nil { - return err - } - - return nil -} - -// Get returns a Source if the id exists. -func (s *SourcesStore) Get(ctx context.Context, id int) (chronograf.Source, error) { - var src chronograf.Source - if err := s.client.db.View(func(tx *bolt.Tx) error { - var err error - src, err = s.get(ctx, id, tx) - if err != nil { - return err - } - return nil - }); err != nil { - return chronograf.Source{}, err - } - - return src, nil -} - -// Update a Source -func (s *SourcesStore) Update(ctx context.Context, src chronograf.Source) error { - if err := s.client.db.Update(func(tx *bolt.Tx) error { - return s.update(ctx, src, tx) - }); err != nil { - return err - } - - return nil -} - -func (s *SourcesStore) all(ctx context.Context, tx *bolt.Tx) ([]chronograf.Source, error) { - var srcs []chronograf.Source - if err := tx.Bucket(SourcesBucket).ForEach(func(k, v []byte) error { - var src chronograf.Source - if err := internal.UnmarshalSource(v, &src); err != nil { - return err - } - srcs = append(srcs, src) - return nil - }); err != nil { - return srcs, err - } - return srcs, nil -} - -// Put updates the source. -func (s *SourcesStore) Put(ctx context.Context, src *chronograf.Source) error { - return s.client.db.Update(func(tx *bolt.Tx) error { - return s.put(ctx, src, tx) - }) -} - -func (s *SourcesStore) put(ctx context.Context, src *chronograf.Source, tx *bolt.Tx) error { - b := tx.Bucket(SourcesBucket) - - if v, err := internal.MarshalSource(*src); err != nil { - return err - } else if err := b.Put(itob(src.ID), v); err != nil { - return err - } - return nil -} - -func (s *SourcesStore) add(ctx context.Context, src *chronograf.Source, tx *bolt.Tx) error { - b := tx.Bucket(SourcesBucket) - seq, err := b.NextSequence() - if err != nil { - return err - } - src.ID = int(seq) - - if src.Default { - if err := s.resetDefaultSource(ctx, tx); err != nil { - return err - } - } - - if v, err := internal.MarshalSource(*src); err != nil { - return err - } else if err := b.Put(itob(src.ID), v); err != nil { - return err - } - return nil -} - -func (s *SourcesStore) delete(ctx context.Context, src chronograf.Source, tx *bolt.Tx) error { - if err := tx.Bucket(SourcesBucket).Delete(itob(src.ID)); err != nil { - return err - } - return nil -} - -func (s *SourcesStore) get(ctx context.Context, id int, tx *bolt.Tx) (chronograf.Source, error) { - var src chronograf.Source - if v := tx.Bucket(SourcesBucket).Get(itob(id)); v == nil { - return src, chronograf.ErrSourceNotFound - } else if err := internal.UnmarshalSource(v, &src); err != nil { - return src, err - } - return src, nil -} - -func (s *SourcesStore) update(ctx context.Context, src chronograf.Source, tx *bolt.Tx) error { - // Get an existing source with the same ID. - b := tx.Bucket(SourcesBucket) - if v := b.Get(itob(src.ID)); v == nil { - return chronograf.ErrSourceNotFound - } - - if src.Default { - if err := s.resetDefaultSource(ctx, tx); err != nil { - return err - } - } - - if v, err := internal.MarshalSource(src); err != nil { - return err - } else if err := b.Put(itob(src.ID), v); err != nil { - return err - } - return nil -} - -// resetDefaultSource unsets the Default flag on all sources -func (s *SourcesStore) resetDefaultSource(ctx context.Context, tx *bolt.Tx) error { - b := tx.Bucket(SourcesBucket) - srcs, err := s.all(ctx, tx) - if err != nil { - return err - } - - for _, other := range srcs { - if other.Default { - other.Default = false - if v, err := internal.MarshalSource(other); err != nil { - return err - } else if err := b.Put(itob(other.ID), v); err != nil { - return err - } - } - } - return nil -} - -// setRandomDefault will locate a source other than the provided -// chronograf.Source and set it as the default source. If no other sources are -// available, the provided source will be set to the default source if is not -// already. It assumes that the provided chronograf.Source has been persisted. -func (s *SourcesStore) setRandomDefault(ctx context.Context, src chronograf.Source, tx *bolt.Tx) error { - // Check if requested source is the current default - if target, err := s.get(ctx, src.ID, tx); err != nil { - return err - } else if target.Default { - // Locate another source to be the new default - srcs, err := s.all(ctx, tx) - if err != nil { - return err - } - var other *chronograf.Source - for idx := range srcs { - other = &srcs[idx] - // avoid selecting the source we're about to delete as the new default - if other.ID != target.ID { - break - } - } - - // set the other to be the default - other.Default = true - if err := s.update(ctx, *other, tx); err != nil { - return err - } - } - return nil -} diff --git a/chronograf/bolt/sources_test.go b/chronograf/bolt/sources_test.go deleted file mode 100644 index 9d8b0a652b5..00000000000 --- a/chronograf/bolt/sources_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package bolt_test - -import ( - "context" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt" -) - -// Ensure an SourceStore can store, retrieve, update, and delete sources. -func TestSourceStore(t *testing.T) { - c, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer c.Close() - - s := c.SourcesStore - - srcs := []chronograf.Source{ - chronograf.Source{ - Name: "Of Truth", - Type: "influx", - Username: "marty", - Password: "I❤️ jennifer parker", - URL: "toyota-hilux.lyon-estates.local", - Default: true, - Organization: "1337", - DefaultRP: "pineapple", - }, - chronograf.Source{ - Name: "HipToBeSquare", - Type: "influx", - Username: "calvinklein", - Password: "chuck b3rry", - URL: "toyota-hilux.lyon-estates.local", - Default: true, - Organization: "1337", - }, - chronograf.Source{ - Name: "HipToBeSquare", - Type: "influx", - Username: "calvinklein", - Password: "chuck b3rry", - URL: "https://toyota-hilux.lyon-estates.local", - InsecureSkipVerify: true, - Default: false, - Organization: "1337", - }, - } - - ctx := context.Background() - // Add new srcs. - for i, src := range srcs { - if srcs[i], err = s.Add(ctx, src); err != nil { - t.Fatal(err) - } - // Confirm first src in the store is the same as the original. - if actual, err := s.Get(ctx, srcs[i].ID); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(actual, srcs[i]) { - t.Fatalf("source loaded is different then source saved; actual: %v, expected %v", actual, srcs[i]) - } - } - - // Update source. - srcs[0].Username = "calvinklein" - srcs[1].Name = "Enchantment Under the Sea Dance" - srcs[2].DefaultRP = "cubeapple" - mustUpdateSource(t, s, srcs[0]) - mustUpdateSource(t, s, srcs[1]) - mustUpdateSource(t, s, srcs[2]) - - // Confirm sources have updated. - if src, err := s.Get(ctx, srcs[0].ID); err != nil { - t.Fatal(err) - } else if src.Username != "calvinklein" { - t.Fatalf("source 0 update error: got %v, expected %v", src.Username, "calvinklein") - } - if src, err := s.Get(ctx, srcs[1].ID); err != nil { - t.Fatal(err) - } else if src.Name != "Enchantment Under the Sea Dance" { - t.Fatalf("source 1 update error: got %v, expected %v", src.Name, "Enchantment Under the Sea Dance") - } - if src, err := s.Get(ctx, srcs[2].ID); err != nil { - t.Fatal(err) - } else if src.DefaultRP != "cubeapple" { - t.Fatalf("source 2 update error: got %v, expected %v", src.DefaultRP, "cubeapple") - } - - // Attempt to make two default sources - srcs[0].Default = true - srcs[1].Default = true - mustUpdateSource(t, s, srcs[0]) - mustUpdateSource(t, s, srcs[1]) - - if actual, err := s.Get(ctx, srcs[0].ID); err != nil { - t.Fatal(err) - } else if actual.Default { - t.Fatal("Able to set two default sources when only one should be permitted") - } - - // Attempt to add a new default source - srcs = append(srcs, chronograf.Source{ - Name: "Biff Tannen", - Type: "influx", - Username: "HELLO", - Password: "MCFLY", - URL: "anybody.in.there.local", - Default: true, - Organization: "1892", - }) - - srcs[3] = mustAddSource(t, s, srcs[3]) - if srcs, err := s.All(ctx); err != nil { - t.Fatal(err) - } else { - defaults := 0 - for _, src := range srcs { - if src.Default { - defaults++ - } - } - - if defaults != 1 { - t.Fatal("Able to add more than one default source") - } - } - - // Delete an source. - if err := s.Delete(ctx, srcs[0]); err != nil { - t.Fatal(err) - } - - // Confirm source has been deleted. - if _, err := s.Get(ctx, srcs[0].ID); err != chronograf.ErrSourceNotFound { - t.Fatalf("source delete error: got %v, expected %v", err, chronograf.ErrSourceNotFound) - } - - // Delete the other source we created - if err := s.Delete(ctx, srcs[3]); err != nil { - t.Fatal(err) - } - - if bsrcs, err := s.All(ctx); err != nil { - t.Fatal(err) - } else if len(bsrcs) != 3 { - t.Fatalf("After delete All returned incorrect number of srcs; got %d, expected %d", len(bsrcs), 3) - } else if !reflect.DeepEqual(bsrcs[0], srcs[1]) { - t.Fatalf("After delete All returned incorrect source; got %v, expected %v", bsrcs[0], srcs[1]) - } - - // Delete the final sources - if err := s.Delete(ctx, srcs[1]); err != nil { - t.Fatal(err) - } - if err := s.Delete(ctx, srcs[2]); err != nil { - t.Fatal(err) - } - if err := s.Delete(ctx, *bolt.DefaultSource); err != nil { - t.Fatal(err) - } - - // Try to add one source as a non-default and ensure that it becomes a - // default - src := mustAddSource(t, s, chronograf.Source{ - Name: "Biff Tannen", - Type: "influx", - Username: "HELLO", - Password: "MCFLY", - URL: "anybody.in.there.local", - Default: false, - Organization: "1234", - }) - - if actual, err := s.Get(ctx, src.ID); err != nil { - t.Fatal(err) - } else if !actual.Default { - t.Fatal("Expected first source added to be default but wasn't") - } -} - -func mustUpdateSource(t *testing.T, s *bolt.SourcesStore, src chronograf.Source) { - ctx := context.Background() - if err := s.Update(ctx, src); err != nil { - t.Fatal(err) - } -} - -func mustAddSource(t *testing.T, s *bolt.SourcesStore, src chronograf.Source) chronograf.Source { - ctx := context.Background() - if src, err := s.Add(ctx, src); err != nil { - t.Fatal(err) - return src - } else { - return src - } -} diff --git a/chronograf/bolt/users.go b/chronograf/bolt/users.go deleted file mode 100644 index 79fd4997114..00000000000 --- a/chronograf/bolt/users.go +++ /dev/null @@ -1,196 +0,0 @@ -package bolt - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt/internal" - bolt "go.etcd.io/bbolt" -) - -// Ensure UsersStore implements chronograf.UsersStore. -var _ chronograf.UsersStore = &UsersStore{} - -// UsersBucket is used to store users local to chronograf -var UsersBucket = []byte("UsersV2") - -// UsersStore uses bolt to store and retrieve users -type UsersStore struct { - client *Client -} - -// get searches the UsersStore for user with id and returns the bolt representation -func (s *UsersStore) get(ctx context.Context, id uint64) (*chronograf.User, error) { - var u chronograf.User - err := s.client.db.View(func(tx *bolt.Tx) error { - v := tx.Bucket(UsersBucket).Get(u64tob(id)) - if v == nil { - return chronograf.ErrUserNotFound - } - return internal.UnmarshalUser(v, &u) - }) - - if err != nil { - return nil, err - } - - return &u, nil -} - -func (s *UsersStore) each(ctx context.Context, fn func(*chronograf.User)) error { - return s.client.db.View(func(tx *bolt.Tx) error { - return tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error { - var user chronograf.User - if err := internal.UnmarshalUser(v, &user); err != nil { - return err - } - fn(&user) - return nil - }) - }) -} - -// Num returns the number of users in the UsersStore -func (s *UsersStore) Num(ctx context.Context) (int, error) { - count := 0 - - err := s.client.db.View(func(tx *bolt.Tx) error { - return tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error { - count++ - return nil - }) - }) - - if err != nil { - return 0, err - } - - return count, nil -} - -// Get searches the UsersStore for user with name -func (s *UsersStore) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.ID != nil { - return s.get(ctx, *q.ID) - } - - if q.Name != nil && q.Provider != nil && q.Scheme != nil { - var user *chronograf.User - err := s.each(ctx, func(u *chronograf.User) { - if user != nil { - return - } - if u.Name == *q.Name && u.Provider == *q.Provider && u.Scheme == *q.Scheme { - user = u - } - }) - - if err != nil { - return nil, err - } - - if user == nil { - return nil, chronograf.ErrUserNotFound - } - - return user, nil - } - - return nil, fmt.Errorf("must specify either ID, or Name, Provider, and Scheme in UserQuery") -} - -func (s *UsersStore) userExists(ctx context.Context, u *chronograf.User) (bool, error) { - _, err := s.Get(ctx, chronograf.UserQuery{ - Name: &u.Name, - Provider: &u.Provider, - Scheme: &u.Scheme, - }) - if err == chronograf.ErrUserNotFound { - return false, nil - } - - if err != nil { - return false, err - } - - return true, nil -} - -// Add a new User to the UsersStore. -func (s *UsersStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - if u == nil { - return nil, fmt.Errorf("user provided is nil") - } - userExists, err := s.userExists(ctx, u) - if err != nil { - return nil, err - } - if userExists { - return nil, chronograf.ErrUserAlreadyExists - } - if err := s.client.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(UsersBucket) - seq, err := b.NextSequence() - if err != nil { - return err - } - u.ID = seq - if v, err := internal.MarshalUser(u); err != nil { - return err - } else if err := b.Put(u64tob(seq), v); err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - - return u, nil -} - -// Delete a user from the UsersStore -func (s *UsersStore) Delete(ctx context.Context, u *chronograf.User) error { - _, err := s.get(ctx, u.ID) - if err != nil { - return err - } - return s.client.db.Update(func(tx *bolt.Tx) error { - return tx.Bucket(UsersBucket).Delete(u64tob(u.ID)) - }) -} - -// Update a user -func (s *UsersStore) Update(ctx context.Context, u *chronograf.User) error { - _, err := s.get(ctx, u.ID) - if err != nil { - return err - } - return s.client.db.Update(func(tx *bolt.Tx) error { - if v, err := internal.MarshalUser(u); err != nil { - return err - } else if err := tx.Bucket(UsersBucket).Put(u64tob(u.ID), v); err != nil { - return err - } - return nil - }) -} - -// All returns all users -func (s *UsersStore) All(ctx context.Context) ([]chronograf.User, error) { - var users []chronograf.User - if err := s.client.db.View(func(tx *bolt.Tx) error { - return tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error { - var user chronograf.User - if err := internal.UnmarshalUser(v, &user); err != nil { - return err - } - users = append(users, user) - return nil - }) - }); err != nil { - return nil, err - } - - return users, nil -} diff --git a/chronograf/bolt/users_test.go b/chronograf/bolt/users_test.go deleted file mode 100644 index e2b38b47d72..00000000000 --- a/chronograf/bolt/users_test.go +++ /dev/null @@ -1,564 +0,0 @@ -package bolt_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" -) - -// IgnoreFields is used because ID is created by BoltDB and cannot be predicted reliably -// EquateEmpty is used because we want nil slices, arrays, and maps to be equal to the empty map -var cmpOptions = cmp.Options{ - cmpopts.IgnoreFields(chronograf.User{}, "ID"), - cmpopts.EquateEmpty(), -} - -func TestUsersStore_GetWithID(t *testing.T) { - type args struct { - ctx context.Context - usr *chronograf.User - } - tests := []struct { - name string - args args - want *chronograf.User - wantErr bool - addFirst bool - }{ - { - name: "User not found", - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - ID: 1337, - }, - }, - wantErr: true, - }, - { - name: "Get user", - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - }, - }, - want: &chronograf.User{ - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - }, - addFirst: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - if tt.addFirst { - tt.args.usr, err = s.Add(tt.args.ctx, tt.args.usr) - if err != nil { - t.Fatal(err) - } - } - got, err := s.Get(tt.args.ctx, chronograf.UserQuery{ID: &tt.args.usr.ID}) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestUsersStore_GetWithNameProviderScheme(t *testing.T) { - type args struct { - ctx context.Context - usr *chronograf.User - } - tests := []struct { - name string - args args - want *chronograf.User - wantErr bool - addFirst bool - }{ - { - name: "User not found", - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - }, - }, - wantErr: true, - }, - { - name: "Get user", - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - }, - }, - want: &chronograf.User{ - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - }, - addFirst: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - if tt.addFirst { - tt.args.usr, err = s.Add(tt.args.ctx, tt.args.usr) - if err != nil { - t.Fatal(err) - } - } - - got, err := s.Get(tt.args.ctx, chronograf.UserQuery{ - Name: &tt.args.usr.Name, - Provider: &tt.args.usr.Provider, - Scheme: &tt.args.usr.Scheme, - }) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestUsersStore_GetInvalid(t *testing.T) { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - - _, err = s.Get(context.Background(), chronograf.UserQuery{}) - if err == nil { - t.Errorf("Invalid Get. UsersStore.Get() error = %v", err) - } -} - -func TestUsersStore_Add(t *testing.T) { - type args struct { - ctx context.Context - u *chronograf.User - addFirst bool - } - tests := []struct { - name string - args args - want *chronograf.User - wantErr bool - }{ - { - name: "Add new user", - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - }, - want: &chronograf.User{ - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - }, - { - name: "User already exists", - args: args{ - ctx: context.Background(), - addFirst: true, - u: &chronograf.User{ - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - if tt.args.addFirst { - _, _ = s.Add(tt.args.ctx, tt.args.u) - } - got, err := s.Add(tt.args.ctx, tt.args.u) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - - if tt.wantErr { - continue - } - - got, err = s.Get(tt.args.ctx, chronograf.UserQuery{ID: &got.ID}) - if err != nil { - t.Fatalf("failed to get user: %v", err) - } - if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.Add():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestUsersStore_Delete(t *testing.T) { - type args struct { - ctx context.Context - user *chronograf.User - } - tests := []struct { - name string - args args - addFirst bool - wantErr bool - }{ - { - name: "No such user", - args: args{ - ctx: context.Background(), - user: &chronograf.User{ - ID: 10, - }, - }, - wantErr: true, - }, - { - name: "Delete new user", - args: args{ - ctx: context.Background(), - user: &chronograf.User{ - Name: "noone", - }, - }, - addFirst: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - - if tt.addFirst { - tt.args.user, _ = s.Add(tt.args.ctx, tt.args.user) - } - if err := s.Delete(tt.args.ctx, tt.args.user); (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func TestUsersStore_Update(t *testing.T) { - type args struct { - ctx context.Context - usr *chronograf.User - roles []chronograf.Role - provider string - scheme string - name string - } - tests := []struct { - name string - args args - addFirst bool - want *chronograf.User - wantErr bool - }{ - { - name: "No such user", - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - ID: 10, - }, - }, - wantErr: true, - }, - { - name: "Update user role", - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "viewer", - }, - }, - }, - roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - want: &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - addFirst: true, - }, - { - name: "Update user provider and scheme", - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - }, - provider: "google", - scheme: "oauth2", - name: "billietta", - }, - want: &chronograf.User{ - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - }, - addFirst: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - - if tt.addFirst { - tt.args.usr, err = s.Add(tt.args.ctx, tt.args.usr) - if err != nil { - t.Fatal(err) - } - } - - if tt.args.roles != nil { - tt.args.usr.Roles = tt.args.roles - } - - if tt.args.provider != "" { - tt.args.usr.Provider = tt.args.provider - } - - if tt.args.scheme != "" { - tt.args.usr.Scheme = tt.args.scheme - } - - if tt.args.name != "" { - tt.args.usr.Name = tt.args.name - } - - if err := s.Update(tt.args.ctx, tt.args.usr); (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - - // for the empty test - if tt.want == nil { - continue - } - - got, err := s.Get(tt.args.ctx, chronograf.UserQuery{ID: &tt.args.usr.ID}) - if err != nil { - t.Fatalf("failed to get user: %v", err) - } - if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestUsersStore_All(t *testing.T) { - tests := []struct { - name string - ctx context.Context - want []chronograf.User - addFirst bool - wantErr bool - }{ - { - name: "No users", - }, - { - name: "Update new user", - want: []chronograf.User{ - { - Name: "howdy", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "viewer", - }, - }, - }, - { - Name: "doody", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - }, - addFirst: true, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - - if tt.addFirst { - for _, u := range tt.want { - s.Add(tt.ctx, &u) - } - } - gots, err := s.All(tt.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - for i, got := range gots { - if diff := cmp.Diff(got, tt.want[i], cmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.All():\n-got/+want\ndiff %s", tt.name, diff) - } - } - } -} - -func TestUsersStore_Num(t *testing.T) { - tests := []struct { - name string - ctx context.Context - users []chronograf.User - want int - wantErr bool - }{ - { - name: "No users", - want: 0, - }, - { - name: "Update new user", - want: 2, - users: []chronograf.User{ - { - Name: "howdy", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "viewer", - }, - }, - }, - { - Name: "doody", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - }, - }, - } - for _, tt := range tests { - client, err := NewTestClient() - if err != nil { - t.Fatal(err) - } - defer client.Close() - - s := client.UsersStore - - for _, u := range tt.users { - s.Add(tt.ctx, &u) - } - got, err := s.Num(tt.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Num() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - t.Errorf("%q. UsersStore.Num() = %d. want %d", tt.name, got, tt.want) - } - } -} diff --git a/chronograf/bolt/util.go b/chronograf/bolt/util.go deleted file mode 100644 index 660aad01aa4..00000000000 --- a/chronograf/bolt/util.go +++ /dev/null @@ -1,19 +0,0 @@ -package bolt - -import ( - "encoding/binary" -) - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -// u64tob returns an 8-byte big endian representation of v. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} diff --git a/chronograf/canned/Makefile b/chronograf/canned/Makefile deleted file mode 100644 index 5d58079d759..00000000000 --- a/chronograf/canned/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# List any generated files here -TARGETS = bin_gen.go -# List any source files used to generate the targets here -SOURCES = bin.go $(shell find . -name '*.json') -# List any directories that have their own Makefile here -SUBDIRS = - -# Default target -all: $(SUBDIRS) $(TARGETS) - -# Recurse into subdirs for same make goal -$(SUBDIRS): - $(MAKE) -C $@ $(MAKECMDGOALS) - -# Clean all targets recursively -clean: $(SUBDIRS) - rm -f $(TARGETS) - -# Define go generate if not already defined -GO_GENERATE := go generate - -# Run go generate for the targets -$(TARGETS): $(SOURCES) - $(GO_GENERATE) -x - -.PHONY: all clean $(SUBDIRS) diff --git a/chronograf/canned/README.md b/chronograf/canned/README.md deleted file mode 100644 index 6a80244c479..00000000000 --- a/chronograf/canned/README.md +++ /dev/null @@ -1,7 +0,0 @@ -## Canned Applications -The JSON application layouts in this directory ship with the application as nice, default layouts and queries for telegraf data. - -### Create new Application - -To create a new application in this directory run `./new_apps.sh`. This shell script will create a new application template with a generated UUID. -Update this layout application file's queries, measurements, and application name. diff --git a/chronograf/canned/TODO.go b/chronograf/canned/TODO.go deleted file mode 100644 index be35a0332b8..00000000000 --- a/chronograf/canned/TODO.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !assets - -package canned - -import "errors" - -// The functions defined in this file are placeholders when the binary is compiled -// without assets. - -// Asset returns an error stating no assets were included in the binary. -func Asset(string) ([]byte, error) { - return nil, errors.New("no assets included in binary") -} - -// AssetNames returns nil because there are no assets included in the binary. -func AssetNames() []string { - return nil -} diff --git a/chronograf/canned/apache.json b/chronograf/canned/apache.json deleted file mode 100644 index 971460b189a..00000000000 --- a/chronograf/canned/apache.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "id": "6dfb4d49-20dc-4157-9018-2b1b1cb75c2d", - "measurement": "apache", - "app": "apache", - "autoflow": false, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0246e457-916b-43e3-be99-211c4cbc03e8", - "name": "Apache Bytes/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"BytesPerSec\")) AS \"bytes_per_sec\" FROM \":db:\".\":rp:\".\"apache\"", - "label": "bytes/s", - "groupbys": [ - "\"server\"" - ], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "37f2e4bb-9fa5-4891-a424-9df5ce7458bb", - "name": "Apache - Requests/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"ReqPerSec\")) AS \"req_per_sec\" FROM \":db:\".\":rp:\".\"apache\"", - "label": "requests/s", - "groupbys": [ - "\"server\"" - ], - "wheres": [] - } - ] - }, - { - "x": 8, - "y": 0, - "w": 4, - "h": 4, - "i": "ea9174b3-2b56-4e80-a37d-064507c6775a", - "name": "Apache - Total Accesses", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"TotalAccesses\")) AS \"tot_access\" FROM \":db:\".\":rp:\".\"apache\"", - "label": "accesses/s", - "groupbys": [ - "\"server\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/bin.go b/chronograf/canned/bin.go deleted file mode 100644 index 8ba3db45f48..00000000000 --- a/chronograf/canned/bin.go +++ /dev/null @@ -1,83 +0,0 @@ -package canned - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -//go:generate env GO111MODULE=on go run github.com/kevinburke/go-bindata/go-bindata -o bin_gen.go -tags assets -ignore README|apps|.sh|go -pkg canned . - -// BinLayoutsStore represents a layout store using data generated by go-bindata -type BinLayoutsStore struct { - Logger chronograf.Logger -} - -// All returns the set of all layouts -func (s *BinLayoutsStore) All(ctx context.Context) ([]chronograf.Layout, error) { - names := AssetNames() - layouts := make([]chronograf.Layout, len(names)) - for i, name := range names { - octets, err := Asset(name) - if err != nil { - s.Logger. - WithField("component", "apps"). - WithField("name", name). - Error("Invalid Layout: ", err) - return nil, chronograf.ErrLayoutInvalid - } - - var layout chronograf.Layout - if err = json.Unmarshal(octets, &layout); err != nil { - s.Logger. - WithField("component", "apps"). - WithField("name", name). - Error("Unable to read layout:", err) - return nil, chronograf.ErrLayoutInvalid - } - layouts[i] = layout - } - - return layouts, nil -} - -// Add is not support by BinLayoutsStore -func (s *BinLayoutsStore) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) { - return chronograf.Layout{}, fmt.Errorf("add to BinLayoutsStore not supported") -} - -// Delete is not support by BinLayoutsStore -func (s *BinLayoutsStore) Delete(ctx context.Context, layout chronograf.Layout) error { - return fmt.Errorf("delete to BinLayoutsStore not supported") -} - -// Get retrieves Layout if `ID` exists. -func (s *BinLayoutsStore) Get(ctx context.Context, ID string) (chronograf.Layout, error) { - layouts, err := s.All(ctx) - if err != nil { - s.Logger. - WithField("component", "apps"). - WithField("name", ID). - Error("Invalid Layout: ", err) - return chronograf.Layout{}, chronograf.ErrLayoutInvalid - } - - for _, layout := range layouts { - if layout.ID == ID { - return layout, nil - } - } - - s.Logger. - WithField("component", "apps"). - WithField("name", ID). - Error("Layout not found") - return chronograf.Layout{}, chronograf.ErrLayoutNotFound -} - -// Update not supported -func (s *BinLayoutsStore) Update(ctx context.Context, layout chronograf.Layout) error { - return fmt.Errorf("update to BinLayoutsStore not supported") -} diff --git a/chronograf/canned/consul.json b/chronograf/canned/consul.json deleted file mode 100644 index a23d44700b6..00000000000 --- a/chronograf/canned/consul.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "id": "f3bec493-0bc1-49d5-a40a-a09bd5cfb60c", - "measurement": "consul_health_checks", - "app": "consul", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9e14639d-b8d9-4245-8c45-862ed4383d31", - "name": "Consul – Number of Critical Health Checks", - "queries": [ - { - "query": "SELECT count(\"check_id\") as \"Number Critical\" FROM \":db:\".\":rp:\".\"consul_health_checks\"", - "label": "count", - "groupbys": [ - "\"service_name\"" - ], - "wheres": [ - "\"status\" = 'critical'" - ] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "595be39d-85db-4410-b349-35c25465a4b8", - "name": "Consul – Number of Warning Health Checks", - "queries": [ - { - "query": "SELECT count(\"check_id\") as \"Number Warning\" FROM \":db:\".\":rp:\".\"consul_health_checks\"", - "label": "count", - "groupbys": [ - "\"service_name\"" - ], - "wheres": [ - "\"status\" = 'warning'" - ] - } - ] - } - ] -} diff --git a/chronograf/canned/consul_agent.json b/chronograf/canned/consul_agent.json deleted file mode 100644 index ade0ff36059..00000000000 --- a/chronograf/canned/consul_agent.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "id": "f3bec493-0bc1-49d5-a40a-a09bd5cfb700", - "measurement": "consul_consul_fsm_register", - "app": "consul_telemetry", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9e14639d-b8d9-4245-8c45-862ed4383701", - "name": "Consul Agent – Number of Go Routines", - "queries": [ - { - "query": "SELECT max(\"value\") AS \"Go Routines\" FROM \":db:\".\":rp:\".\"consul_ip-172-31-6-247_runtime_num_goroutines\"", - "groupbys": [ - ], - "wheres": [ - ] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9e14639d-b8d9-4245-8c45-862ed4383702", - "name": "Consul Agent – Runtime Alloc Bytes", - "queries": [ - { - "query": "SELECT max(\"value\") AS \"Runtime Alloc Bytes\" FROM \":db:\".\":rp:\".\"consul_ip-172-31-6-247_runtime_alloc_bytes\"", - "groupbys": [ - ], - "wheres": [ - ] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9e14639d-b8d9-4245-8c45-862ed4383703", - "name": "Consul Agent – Heap Objects", - "queries": [ - { - "query": "SELECT max(\"value\") AS \"Heap Objects\" FROM \":db:\".\":rp:\".\"consul_ip-172-31-6-247_runtime_heap_objects\"", - "groupbys": [ - ], - "wheres": [ - ] - } - ] - } - ] -} diff --git a/chronograf/canned/consul_cluster.json b/chronograf/canned/consul_cluster.json deleted file mode 100644 index 1fce31b7f60..00000000000 --- a/chronograf/canned/consul_cluster.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "350b780c-7d32-4b29-ac49-0d4e2c092943", - "measurement": "consul_memberlist_msg_alive", - "app": "consul_telemetry", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "bd62186a-f475-478b-bf02-8c4ab07eccd1", - "name": "Consul – Number of Agents", - "queries": [ - { - "query": "SELECT min(\"value\") AS \"num_agents\" FROM \":db:\".\":rp:\".\"consul_memberlist_msg_alive\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/consul_election.json b/chronograf/canned/consul_election.json deleted file mode 100644 index 02ae36fba2a..00000000000 --- a/chronograf/canned/consul_election.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "b15aaf24-701a-4d9b-920c-9a407e91da71", - "measurement": "consul_raft_state_candidate", - "app": "consul_telemetry", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "5b2bddce-badb-4594-91fb-0486f62266e5", - "name": "Consul – Leadership Election", - "queries": [ - { - "query": "SELECT max(\"value\") AS \"max_value\" FROM \":db:\".\":rp:\".\"consul_raft_state_candidate\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/consul_http.json b/chronograf/canned/consul_http.json deleted file mode 100644 index 624e175131b..00000000000 --- a/chronograf/canned/consul_http.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "26809869-8df3-49ad-b2f0-b1e1c72f67b0", - "measurement": "consul_consul_http_GET_v1_health_state__", - "app": "consul_telemetry", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "dfb4c50f-547e-484a-944b-d6374ba2b4c0", - "name": "Consul – HTTP Request Time (ms)", - "queries": [ - { - "query": "SELECT max(\"upper\") AS \"GET_health_state\" FROM \":db:\".\":rp:\".\"consul_consul_http_GET_v1_health_state__\"", - "label": "ms", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/consul_leadership.json b/chronograf/canned/consul_leadership.json deleted file mode 100644 index cdd6f9adfad..00000000000 --- a/chronograf/canned/consul_leadership.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "34611ae0-7c3e-4697-8db0-371b16bef345", - "measurement": "consul_raft_state_leader", - "app": "consul_telemetry", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "ef8eeeb5-b408-46d6-8cfc-20c00c9d7239", - "name": "Consul – Leadership Change", - "queries": [ - { - "query": "SELECT max(\"value\") as \"change\" FROM \":db:\".\":rp:\".\"consul_raft_state_leader\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/consul_serf_events.json b/chronograf/canned/consul_serf_events.json deleted file mode 100644 index 87853b961e5..00000000000 --- a/chronograf/canned/consul_serf_events.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "ef4b596c-77de-41c5-bb5b-d5c9a69fa633", - "measurement": "consul_serf_events", - "app": "consul_telemetry", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "59df3d73-5fac-48cb-84f1-dbe9a1bb886c", - "name": "Consul – Number of serf events", - "queries": [ - { - "query": "SELECT max(\"value\") AS \"serf_events\" FROM \":db:\".\":rp:\".\"consul_serf_events\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/cpu.json b/chronograf/canned/cpu.json deleted file mode 100644 index 46b1d28a28a..00000000000 --- a/chronograf/canned/cpu.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "0fa47984-825b-46f1-9ca5-0366e3281cc5", - "measurement": "cpu", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "cc9ba2b6-e398-4396-80dc-819bb7ac7ce1", - "name": "CPU Usage", - "queries": [ - { - "query": "SELECT 100 - mean(\"usage_idle\") AS \"usage\" FROM \":db:\".\":rp:\".\"cpu\"", - "label": "% CPU time", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/disk.json b/chronograf/canned/disk.json deleted file mode 100644 index 1c2fa1c4c96..00000000000 --- a/chronograf/canned/disk.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "id": "0b75be4e-3454-4d5d-9a98-ca77c81397f6", - "measurement": "disk", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "5825a4dd-df97-4e99-a99d-67b68833c183", - "name": "System - Disk used %", - "queries": [ - { - "query": "SELECT mean(\"used_percent\") AS \"used_percent\" FROM \":db:\".\":rp:\".\"disk\"", - "label": "% used", - "groupbys": [ - "\"path\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/diskio.json b/chronograf/canned/diskio.json deleted file mode 100644 index 9ad163ba86b..00000000000 --- a/chronograf/canned/diskio.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "id": "9e3a9fcd-a363-4470-991e-a4d6987a94c8", - "measurement": "diskio", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "7f647740-d9f0-4012-8e7a-5d898c8f271e", - "name": "System – Disk MB/s", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"read_bytes\"), 1s) / 1000000 AS \"read_megabytes_per_second\" FROM \":db:\".\":rp:\".\"diskio\"", - "groupbys": [ - "\"name\"" - ], - "wheres": [], - "label": "MB/s" - }, - { - "query": "SELECT non_negative_derivative(max(\"write_bytes\"), 1s) / 1000000 AS \"write_megabytes_per_second\" FROM \":db:\".\":rp:\".\"diskio\"", - "groupbys": [ - "\"name\"" - ], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/docker.json b/chronograf/canned/docker.json deleted file mode 100644 index f3bbc1b8963..00000000000 --- a/chronograf/canned/docker.json +++ /dev/null @@ -1,111 +0,0 @@ -{ - "id": "0e980b97-c162-487b-a815-3f955df6243f", - "app": "docker", - "measurement": "docker", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef22", - "name": "Docker - Container CPU %", - "queries": [ - { - "query": "SELECT mean(\"usage_percent\") AS \"usage_percent\" FROM \":db:\".\":rp:\".\"docker_container_cpu\"", - "label": "% CPU time", - "groupbys": [ - "\"container_name\"" - ] - } - ], - "type": "line-stacked" - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef00", - "name": "Docker - Container Memory (MB)", - "queries": [ - { - "query": "SELECT mean(\"usage\") / 1048576 AS \"usage\" FROM \":db:\".\":rp:\".\"docker_container_mem\"", - "label": "MB", - "groupbys": [ - "\"container_name\"" - ] - } - ], - "type": "line-stepplot" - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef01", - "name": "Docker - Containers", - "queries": [ - { - "query": "SELECT max(\"n_containers\") AS \"max_n_containers\" FROM \":db:\".\":rp:\".\"docker\"", - "label": "count", - "groupbys": [ - "\"host\"" - ] - } - ], - "colors": [], - "type": "single-stat" - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef02", - "name": "Docker - Images", - "queries": [ - { - "query": "SELECT max(\"n_images\") AS \"max_n_images\" FROM \":db:\".\":rp:\".\"docker\"", - "groupbys": [ - "\"host\"" - ] - } - ], - "colors": [], - "type": "single-stat" - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef03", - "name": "Docker - Container State", - "queries": [ - { - "query": "SELECT max(\"n_containers_running\") AS \"max_n_containers_running\" FROM \":db:\".\":rp:\".\"docker\"", - "label": "count", - "groupbys": [ - "\"host\"" - ] - }, - { - "query": "SELECT max(\"n_containers_stopped\") AS \"max_n_containers_stopped\" FROM \":db:\".\":rp:\".\"docker\"", - "groupbys": [ - "\"host\"" - ] - }, - { - "query": "SELECT max(\"n_containers_paused\") AS \"max_n_containers_paused\" FROM \":db:\".\":rp:\".\"docker\"", - "groupbys": [ - "\"host\"" - ] - } - ], - "type": "" - } - ] -} diff --git a/chronograf/canned/docker_blkio.json b/chronograf/canned/docker_blkio.json deleted file mode 100644 index 71cd5890d34..00000000000 --- a/chronograf/canned/docker_blkio.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "id": "0e980b97-c162-487b-a815-3f955df62440", - "measurement": "docker_container_blkio", - "app": "docker", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef50", - "name": "Docker - Container Block IO", - "queries": [ - { - "query": "SELECT max(\"io_serviced_recursive_read\") AS \"max_io_read\" FROM \":db:\".\":rp:\".\"docker_container_blkio\"", - "groupbys": [ - "\"container_name\"" - ], - "wheres": [] - }, - { - "query": "SELECT max(\"io_serviced_recursive_sync\") AS \"max_io_sync\" FROM \":db:\".\":rp:\".\"docker_container_blkio\"", - "groupbys": [ - "\"container_name\"" - ], - "wheres": [] - }, - { - "query": "SELECT max(\"io_serviced_recursive_write\") AS \"max_io_write\" FROM \":db:\".\":rp:\".\"docker_container_blkio\"", - "groupbys": [ - "\"container_name\"" - ], - "wheres": [] - }, - { - "query": "SELECT max(\"io_serviced_recursive_total\") AS \"max_io_total\" FROM \":db:\".\":rp:\".\"docker_container_blkio\"", - "groupbys": [ - "\"container_name\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/docker_net.json b/chronograf/canned/docker_net.json deleted file mode 100644 index aa2b2f774d1..00000000000 --- a/chronograf/canned/docker_net.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "id": "0e980b97-c162-487b-a815-3f955df62430", - "measurement": "docker_container_net", - "app": "docker", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef23", - "name": "Docker - Container Network", - "queries": [ - { - "query": "SELECT derivative(mean(\"tx_bytes\"), 10s) AS \"net_tx_bytes\" FROM \":db:\".\":rp:\".\"docker_container_net\"", - "groupbys": [ - "\"container_name\"" - ], - "wheres": [] - }, - { - "query": "SELECT derivative(mean(\"rx_bytes\"), 10s) AS \"net_rx_bytes\" FROM \":db:\".\":rp:\".\"docker_container_net\"", - "groupbys": [ - "\"container_name\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/elasticsearch.json b/chronograf/canned/elasticsearch.json deleted file mode 100644 index 12385d7c23b..00000000000 --- a/chronograf/canned/elasticsearch.json +++ /dev/null @@ -1,158 +0,0 @@ -{ - "id": "1f3ac9d0-bfb3-4e13-91a6-8949f7643ee9", - "measurement": "elasticsearch_indices", - "app": "elasticsearch", - "autoflow": false, - "cells": [ - { - "x": 0, - "y": 0, - "w": 12, - "h": 4, - "i": "3254c2ee-4b0f-440e-9cba-b996b96bf12a", - "name": "ElasticSearch - Query Throughput", - "queries": [ - { - "query": "select non_negative_derivative(mean(search_query_total)) as searches_per_min, non_negative_derivative(mean(search_scroll_total)) as scrolls_per_min, non_negative_derivative(mean(search_fetch_total)) as fetches_per_min, non_negative_derivative(mean(search_suggest_total)) as suggests_per_min from elasticsearch_indices", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 4, - "w": 12, - "h": 4, - "i": "7db341c0-455b-4595-8d34-61dfbdaf6cc6", - "name": "ElasticSearch - Open Connections", - "queries": [ - { - "query": "select mean(current_open) from elasticsearch_http", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 8, - "w": 6, - "h": 4, - "i": "ca304109-35db-4066-91e4-00875a618abb", - "name": "ElasticSearch - Query Latency", - "queries": [ - { - "query": "select non_negative_derivative(mean(search_query_time_in_millis)) as mean, non_negative_derivative(median(search_query_time_in_millis)) as median, non_negative_derivative(percentile(search_query_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 6, - "y": 8, - "w": 6, - "h": 4, - "i": "e0418118-a562-49d1-bf50-83943f72b245", - "name": "ElasticSearch - Fetch Latency", - "queries": [ - { - "query": "select non_negative_derivative(mean(search_fetch_time_in_millis)) as mean, non_negative_derivative(median(search_fetch_time_in_millis)) as median, non_negative_derivative(percentile(search_fetch_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 12, - "w": 6, - "h": 4, - "i": "3912091e-2ee5-4f47-bc74-40520239372d", - "name": "ElasticSearch - Suggest Latency", - "queries": [ - { - "query": "select non_negative_derivative(mean(search_suggest_time_in_millis)) as mean, non_negative_derivative(median(search_suggest_time_in_millis)) as median, non_negative_derivative(percentile(search_suggest_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 6, - "y": 12, - "w": 6, - "h": 4, - "i": "01e536cd-baf8-4bf3-9cee-9c1d149b58ef", - "name": "ElasticSearch - Scroll Latency", - "queries": [ - { - "query": "select non_negative_derivative(mean(search_scroll_time_in_millis)) as mean, non_negative_derivative(median(search_scroll_time_in_millis)) as median, non_negative_derivative(percentile(search_scroll_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 16, - "w": 12, - "h": 4, - "i": "306d6cdc-93ef-49d9-8151-a1bae355dfc6", - "name": "ElasticSearch - Indexing Latency", - "queries": [ - { - "query": "select non_negative_derivative(mean(indexing_index_time_in_millis)) as mean from elasticsearch_indices", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 20, - "w": 4, - "h": 4, - "i": "5ef57f9f-4cba-4f9e-9264-15aa2954c724", - "name": "ElasticSearch - JVM GC Collection Counts", - "queries": [ - { - "query": "select mean(gc_collectors_old_collection_count) as old_count, mean(gc_collectors_young_collection_count) as young_count from elasticsearch_jvm", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 20, - "w": 4, - "h": 4, - "i": "fa7c807e-3e87-4d26-869b-e0ffd3ef344a", - "name": "ElasticSearch - JVM GC Latency", - "queries": [ - { - "query": "select non_negative_derivative(mean(gc_collectors_old_collection_time_in_millis)) as mean_old_time, non_negative_derivative(mean(gc_collectors_young_collection_time_in_millis)) as mean_young_time from elasticsearch_jvm", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 8, - "y": 20, - "w": 4, - "h": 4, - "i": "6f4e01c4-31d6-4302-8e62-9f31f6c3f46f", - "name": "ElasticSearch - JVM Heap Usage", - "queries": [ - { - "query": "select mean(mem_heap_used_percent) from elasticsearch_jvm", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/haproxy.json b/chronograf/canned/haproxy.json deleted file mode 100644 index 7dd4391d148..00000000000 --- a/chronograf/canned/haproxy.json +++ /dev/null @@ -1,238 +0,0 @@ -{ - "id": "45c064fd-ebf7-45a1-bf8d-f53746d38a03", - "measurement": "haproxy", - "app": "haproxy", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "b846eda3-e068-4a34-91e9-c108c962a572", - "name": "HAProxy – Number of Servers", - "queries": [ - { - "query": "select mean(\"active_servers\") AS active_servers, mean(\"backup_servers\") AS backup_servers FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "a5070a99-c65a-4dfd-b486-2d3a2582d9eb", - "name": "HAProxy – Sum HTTP 2xx", - "queries": [ - { - "query": "SELECT non_negative_derivative(last(\"http_response.2xx\"), 1s) AS \"2xx\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "ab80deab-f9be-4506-b547-6f8286cb7660", - "name": "HAProxy – Sum HTTP 4xx", - "queries": [ - { - "query": "SELECT non_negative_derivative(last(\"http_response.4xx\"), 1s) AS \"4xx\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9754391d-3464-49cc-b3ef-de9332d3bc20", - "name": "HAProxy – Sum HTTP 5xx", - "queries": [ - { - "query": "SELECT non_negative_derivative(last(\"http_response.5xx\"), 1s) AS \"5xx\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "c836d118-6b03-436c-af60-0f95a5df0c89", - "name": "HAProxy – Frontend HTTP Requests/Second ", - "queries": [ - { - "query": "SELECT mean(\"req_rate\") AS \"requests_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "cc411bc8-8f14-43bb-865b-4b921310aef3", - "name": "HAProxy – Frontend Sessions/Second ", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"rate\")) AS \"sessions_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "3cc170b6-cd89-4142-b6a7-ea61b78bbdff", - "name": "HAProxy – Frontend Session Usage %", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"scur\")) / non_negative_derivative(max(\"slim\")) * 100 AS \"session_usage_percent\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "724db2a3-f23d-46d6-aa5b-f9e44cac1ee2", - "name": "HAProxy – Frontend Security Denials/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"dreq\")) AS \"denials_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "082a4e23-9256-441c-8414-db253a2c6d94", - "name": "HAProxy – Frontend Request Errors/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"ereq\")) AS \"errors_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "c7de430d-5684-494d-b735-0c87e7ea14e3", - "name": "HAProxy – Frontend Bytes/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"bin\")) AS \"bytes_in_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"bout\")) AS \"bytes_out_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "cde02d86-4243-48d4-b812-46f8119b2ac5", - "name": "HAProxy – Backend Average Response Time (ms)", - "queries": [ - { - "query": "SELECT max(\"rtime\") AS \"response_time\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "2e8ef243-c993-4a53-b010-32de4beb1f81", - "name": "HAProxy – Backend Connection Errors/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"econ\")) AS \"errors_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "f4223249-d2fa-4778-bb27-449bf8863ea3", - "name": "HAProxy – Backend Queued Requests/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"qcur\")) AS \"queued_per_second\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "b3bcca49-7118-4f7e-921d-a8d47505795a", - "name": "HAProxy – Backend Average Request Queue Time (ms)", - "queries": [ - { - "query": "SELECT max(\"qtime\") AS \"queue_time\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "dd925132-3419-4677-9f21-a3d34cf25c99", - "name": "HAProxy – Backend Error Responses/Second", - "queries": [ - { - "query": "SELECT max(\"eresp\") AS \"error_response_rate\" FROM \":db:\".\":rp:\".\"haproxy\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/influxdb_database.json b/chronograf/canned/influxdb_database.json deleted file mode 100644 index 87e8bbc7c57..00000000000 --- a/chronograf/canned/influxdb_database.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "id": "543aa120-14ba-46a2-8ef9-6e6c7be3d600", - "measurement": "influxdb_database", - "app": "influxdb", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "ebc3e2df-640f-4344-b493-d5aae873b6d3", - "name": "InfluxDB - Cardinality", - "queries": [ - { - "query": "SELECT max(\"numMeasurements\") AS \"measurements\" FROM \":db:\".\":rp:\".\"influxdb_database\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT max(\"numSeries\") AS \"series\" FROM \":db:\".\":rp:\".\"influxdb_database\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/influxdb_httpd.json b/chronograf/canned/influxdb_httpd.json deleted file mode 100644 index 252c6936e02..00000000000 --- a/chronograf/canned/influxdb_httpd.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "id": "e0d70dc9-538a-4b29-8d27-4a76d5fc8a09", - "measurement": "influxdb_httpd", - "app": "influxdb", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "b4cbb2d6-a261-482a-942d-04e510f2b532", - "name": "InfluxDB - Write HTTP Requests", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"writeReq\")) AS \"http_requests\" FROM \":db:\".\":rp:\".\"influxdb_httpd\"", - "label": "count/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "cb473467-1854-4c7c-930e-769f24beb761", - "name": "InfluxDB - Query Requests", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"queryReq\")) AS \"query_requests\" FROM \":db:\".\":rp:\".\"influxdb_httpd\"", - "label": "count/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "type": "line-stepplot", - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "e0d70dc9-538a-4b29-8d27-4a76d5fc8a09", - "name": "InfluxDB - Client Failures", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"clientError\")) AS \"client_errors\" FROM \":db:\".\":rp:\".\"influxdb_httpd\"", - "label": "count/s", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"authFail\"), 1s) AS \"auth_fail\" FROM \":db:\".\":rp:\".\"influxdb_httpd\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/influxdb_queryExecutor.json b/chronograf/canned/influxdb_queryExecutor.json deleted file mode 100644 index b280356a0bb..00000000000 --- a/chronograf/canned/influxdb_queryExecutor.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "id": "543aa120-14ba-46a2-8ef9-6e6c7be3d60e", - "measurement": "influxdb_queryExecutor", - "app": "influxdb", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "974f6948-d79a-4925-8162-193e6ddf1c7a", - "name": "InfluxDB - Query Performance", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"queryDurationNs\"), 1s) / 1000000 AS \"duration_ms\" FROM \":db:\".\":rp:\".\"influxdb_queryExecutor\"", - "label": "ms", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"queriesExecuted\"), 1s) / 1000000 AS \"queries_executed_ms\" FROM \":db:\".\":rp:\".\"influxdb_queryExecutor\"", - "label": "ms", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/influxdb_write.json b/chronograf/canned/influxdb_write.json deleted file mode 100644 index f6dd6228b1a..00000000000 --- a/chronograf/canned/influxdb_write.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "id": "74fe93bf-14d6-40d4-af8f-335554f4acf3", - "measurement": "influxdb_write", - "app": "influxdb", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "12384232-7bc7-4129-8958-ef551a320524", - "name": "InfluxDB - Write Points", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"pointReq\")) AS \"points_written\" FROM \":db:\".\":rp:\".\"influxdb_write\"", - "label": "points/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "6281a48f-c29a-4941-bdd9-07f6d0fd98cf", - "name": "InfluxDB - Write Errors", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"writeError\")) AS \"shard_write_error\" FROM \":db:\".\":rp:\".\"influxdb_write\"", - "label": "errors/s", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"serveError\")) AS \"http_error\" FROM \":db:\".\":rp:\".\"influxdb_httpd\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/kubernetes_node.json b/chronograf/canned/kubernetes_node.json deleted file mode 100644 index e427c7f3d0f..00000000000 --- a/chronograf/canned/kubernetes_node.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "id": "4a1efaec-57cf-4aeb-8dea-8a015f8ec3c5", - "measurement": "kubernetes_node", - "app": "kubernetes", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "5f406919-14b8-4c01-b0ce-f8ed75310805", - "name": "K8s - Node Millicores", - "queries": [ - { - "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM \":db:\".\":rp:\".\"kubernetes_node\"", - "groupbys": [ - "\"node_name\"" - ], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "042d47cc-fcfd-4b26-a690-d81c0321d408", - "name": "K8s - Node Memory Bytes", - "queries": [ - { - "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM \":db:\".\":rp:\".\"kubernetes_node\"", - "groupbys": [ - "\"node_name\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/kubernetes_pod_container.json b/chronograf/canned/kubernetes_pod_container.json deleted file mode 100644 index 650f0119ac9..00000000000 --- a/chronograf/canned/kubernetes_pod_container.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "id": "50a14fed-6d0c-4c8a-a142-ad9276bee245", - "measurement": "kubernetes_pod_container", - "app": "kubernetes", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "e2427235-c81b-42a1-afdf-80d340fc01f8", - "name": "K8s - Pod Millicores", - "queries": [ - { - "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM \":db:\".\":rp:\".\"kubernetes_pod_container\"", - "groupbys": [ - "\"pod_name\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "6edb8c61-f723-47ce-a7eb-904fc6fe066e", - "name": "K8s - Pod Memory Bytes", - "queries": [ - { - "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM \":db:\".\":rp:\".\"kubernetes_pod_container\"", - "groupbys": [ - "\"pod_name\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/kubernetes_pod_network.json b/chronograf/canned/kubernetes_pod_network.json deleted file mode 100644 index 2eb7c099f8f..00000000000 --- a/chronograf/canned/kubernetes_pod_network.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "id": "45845136-bcb7-41ad-a02e-c63e9d3452de", - "measurement": "kubernetes_pod_network", - "app": "kubernetes", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0e06ddcd-05dd-493f-9dba-a382300a7190", - "name": "K8s - Pod TX Bytes/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"tx_bytes\")) AS \"tx_bytes_per_second\" FROM \":db:\".\":rp:\".\"kubernetes_pod_network\"", - "groupbys": [ - "\"pod_name\"", - "\"host\"" - ], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "cc062b4c-70ca-4bd7-b372-398e734feb49", - "name": "K8s - Pod RX Bytes/Second ", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"rx_bytes\")) AS \"rx_bytes_per_second\" FROM \":db:\".\":rp:\".\"kubernetes_pod_network\"", - "groupbys": [ - "\"pod_name\"", - "\"host\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/kubernetes_system_container.json b/chronograf/canned/kubernetes_system_container.json deleted file mode 100644 index 848935cec0d..00000000000 --- a/chronograf/canned/kubernetes_system_container.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "id": "05dde59a-a52f-4ede-81fa-0c6011f29287", - "measurement": "kubernetes_system_container", - "app": "kubernetes", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "64cf0f60-e157-4c03-9d7e-c280a7e2695f", - "name": "K8s - Kubelet Millicores", - "queries": [ - { - "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM \":db:\".\":rp:\".\"kubernetes_system_container\"", - "groupbys": [], - "wheres": [ - "\"container_name\" = 'kubelet'" - ] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "4a7454d1-4d60-4077-9e7b-8c915a00fe66", - "name": "K8s - Kubelet Memory Bytes", - "queries": [ - { - "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM \":db:\".\":rp:\".\"kubernetes_system_container\"", - "groupbys": [], - "wheres": [ - "\"container_name\" = 'kubelet'" - ] - } - ] - } - ] -} diff --git a/chronograf/canned/load.json b/chronograf/canned/load.json deleted file mode 100644 index 33e672a66f0..00000000000 --- a/chronograf/canned/load.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "ec6c48f4-48ca-4ba7-a842-5b700e19f274", - "measurement": "system", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "6ec7e632-2c19-475c-8747-56feaacf46ce", - "name": "System Load", - "queries": [ - { - "query": "SELECT mean(\"load1\") AS \"load\" FROM \":db:\".\":rp:\".\"system\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/mem.json b/chronograf/canned/mem.json deleted file mode 100644 index 045ce2f505d..00000000000 --- a/chronograf/canned/mem.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "id": "4a805493-f7ef-4da0-8de8-e78afd899722", - "measurement": "mem", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "e6e5063c-43d5-409b-a0ab-68da51ed3f28", - "name": "System - Memory Gigabytes Used", - "queries": [ - { - "query": "SELECT mean(\"used\") / 1073741824 AS \"used\", mean(\"available\") / 1073741824 AS \"available\" FROM \":db:\".\":rp:\".\"mem\"", - "label": "GB", - "groupbys": [], - "wheres": [] - } - ], - "type": "line-stacked" - } - ] -} diff --git a/chronograf/canned/memcached.json b/chronograf/canned/memcached.json deleted file mode 100644 index e15c75791b9..00000000000 --- a/chronograf/canned/memcached.json +++ /dev/null @@ -1,216 +0,0 @@ -{ - "id": "f280c8c7-0530-425c-b281-788d8ded7676", - "measurement": "memcached", - "app": "memcached", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af490", - "name": "Memcached - Current Connections", - "queries": [ - { - "query": "SELECT max(\"curr_connections\") AS \"current_connections\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af400", - "name": "Memcached - Get Hits/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"get_hits\")) AS \"get_hits\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "hits/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af405", - "name": "Memcached - Get Misses/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"get_misses\")) AS \"get_misses\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "misses/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af413", - "name": "Memcached - Delete Hits/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"delete_hits\")) AS \"delete_hits\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "deletes/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af412", - "name": "Memcached - Delete Misses/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"delete_misses\")) AS \"delete_misses\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "delete misses/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af411", - "name": "Memcached - Incr Hits/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"incr_hits\")) AS \"incr_hits\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "incr hits/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af510", - "name": "Memcached - Incr Misses/Second", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"incr_misses\")) AS \"incr_misses\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "incr misses/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af402", - "name": "Memcached - Current Items", - "queries": [ - { - "query": "SELECT max(\"curr_items\") AS \"current_items\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af403", - "name": "Memcached - Total Items", - "queries": [ - { - "query": "SELECT max(\"total_items\") AS \"total_items\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af404", - "name": "Memcached - Bytes Stored", - "queries": [ - { - "query": "SELECT max(\"bytes\") AS \"bytes\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "bytes", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af406", - "name": "Memcached - Bytes Read/Sec", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"bytes_read\")) AS \"bytes_read\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "bytes/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af407", - "name": "Memcached - Bytes Written/Sec", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"bytes_written\")) AS \"bytes_written\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "bytes/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af401", - "name": "Memcached - Evictions/10 Seconds", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"evictions\"), 10s) AS \"evictions\" FROM \":db:\".\":rp:\".\"memcached\"", - "label": "evictions / 10s", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/mesos.json b/chronograf/canned/mesos.json deleted file mode 100644 index 85c4683fcdb..00000000000 --- a/chronograf/canned/mesos.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "id": "0fa47984-825b-46f1-9ca5-0366e3220000", - "measurement": "mesos", - "app": "mesos", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fa47984-825b-46f1-9ca5-0366e3220007", - "name": "Mesos Active Slaves", - "queries": [ - { - "query": "SELECT max(\"master/slaves_active\") AS \"Active Slaves\" FROM \":db:\".\":rp:\".\"mesos\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fa47984-825b-46f1-9ca5-0366e3220001", - "name": "Mesos Tasks Active", - "queries": [ - { - "query": "SELECT max(\"master/tasks_running\") AS \"num tasks\" FROM \":db:\".\":rp:\".\"mesos\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fa47984-825b-46f1-9ca5-0366e3220004", - "name": "Mesos Tasks", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"master/tasks_finished\"), 60s) AS \"tasks finished\" FROM \":db:\".\":rp:\".\"mesos\"", - "label": "count", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"master/tasks_failed\"), 60s) AS \"tasks failed\" FROM \":db:\".\":rp:\".\"mesos\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"master/tasks_killed\"), 60s) AS \"tasks killed\" FROM \":db:\".\":rp:\".\"mesos\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fa47984-825b-46f1-9ca5-0366e3220005", - "name": "Mesos Outstanding offers", - "queries": [ - { - "query": "SELECT max(\"master/outstanding_offers\") AS \"Outstanding Offers\" FROM \":db:\".\":rp:\".\"mesos\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fa47984-825b-46f1-9ca5-0366e3220002", - "name": "Mesos Available/Used CPUs", - "queries": [ - { - "query": "SELECT max(\"master/cpus_total\") AS \"cpu total\", max(\"master/cpus_used\") AS \"cpu used\" FROM \":db:\".\":rp:\".\"mesos\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fa47984-825b-46f1-9ca5-0366e3220003", - "name": "Mesos Available/Used Memory", - "queries": [ - { - "query": "SELECT max(\"master/mem_total\") AS \"memory total\", max(\"master/mem_used\") AS \"memory used\" FROM \":db:\".\":rp:\".\"mesos\"", - "label": "MB", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fa47984-825b-46f1-9ca5-0366e3220008", - "name": "Mesos Master Uptime", - "colors": [], - "type": "single-stat", - "queries": [ - { - "query": "SELECT max(\"master/uptime_secs\") AS \"uptime\" FROM \":db:\".\":rp:\".\"mesos\"", - "label": "Seconds", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/mongodb.json b/chronograf/canned/mongodb.json deleted file mode 100644 index 3d2e7858a95..00000000000 --- a/chronograf/canned/mongodb.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "id": "921298ad-0cdd-44f4-839b-10c319e7fcc7", - "measurement": "mongodb", - "app": "mongodb", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "b2631fd5-7d32-4a31-9edf-98362fd3626e", - "name": "MongoDB – Read/Second", - "queries": [ - { - "query": "SELECT mean(queries_per_sec) AS queries_per_second, mean(getmores_per_sec) AS getmores_per_second FROM \":db:\".\":rp:\".\"mongodb\"", - "label": "reads/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9362e390-951b-4dba-adec-40c261e37604", - "name": "MongoDB – Writes/Second", - "queries": [ - { - "query": "SELECT mean(inserts_per_sec) AS inserts_per_second, mean(updates_per_sec) AS updates_per_second, mean(deletes_per_sec) AS deletes_per_second FROM \":db:\".\":rp:\".\"mongodb\"", - "label": "writes/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "7ca54d4c-9f0d-47fd-a7fe-2d01e832bbf4", - "name": "MongoDB – Active Connections", - "queries": [ - { - "query": "SELECT mean(open_connections) AS open_connections FROM \":db:\".\":rp:\".\"mongodb\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "ea5ae388-9ca3-42f9-835f-cc9b265705be", - "name": "MongoDB – Reads/Writes Waiting in Queue", - "queries": [ - { - "query": "SELECT max(queued_reads) AS queued_reads, max(queued_writes) as queued_writes FROM \":db:\".\":rp:\".\"mongodb\"", - "label": "count", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "631dcbba-c997-4fd7-b640-754a1b36026c", - "name": "MongoDB – Network Bytes/Second", - "queries": [ - { - "query": "SELECT mean(net_in_bytes) AS net_in_bytes, mean(net_out_bytes) as net_out_bytes FROM \":db:\".\":rp:\".\"mongodb\"", - "label": "bytes/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "5b03bef0-e5e9-4b53-b5f8-1d1b740cf5a2", - "name": "MongoDB – Page Faults", - "queries": [ - { - "query": "SELECT mean(page_faults_per_sec) AS page_faults_per_second FROM \":db:\".\":rp:\".\"mongodb\"", - "label": "faults/s", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4bc98883-2347-46bb-9459-1c6fe7fb47a8", - "name": "MongoDB – Memory Usage (MB)", - "queries": [ - { - "query": "SELECT mean(vsize_megabytes) AS virtual_memory_megabytes, mean(resident_megabytes) as resident_memory_megabytes FROM \":db:\".\":rp:\".\"mongodb\"", - "label": "MB", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/mysql.json b/chronograf/canned/mysql.json deleted file mode 100644 index 0bc92dd911f..00000000000 --- a/chronograf/canned/mysql.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "id": "c1aa88c7-a047-4b52-85c4-0eec21b357ef", - "measurement": "mysql", - "app": "mysql", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "531192d3-f183-4481-afea-79103d56875a", - "name": "MySQL – Reads/Second", - "queries": [ - { - "query": - "SELECT non_negative_derivative(last(\"commands_select\"), 1s) AS selects_per_second FROM \":db:\".\":rp:\".\"mysql\"", - "groupbys": ["\"server\""], - "wheres": [] - }, - { - "query": - "SELECT non_negative_derivative(last(\"com_select\"), 1s) AS selects_per_second FROM \":db:\".\":rp:\".\"mysql\"", - "groupbys": ["\"server\""], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "2dc5e60e-6ddb-43cb-80c5-dfc9294dad97", - "name": "MySQL – Writes/Second", - "queries": [ - { - "query": - "SELECT non_negative_derivative(last(\"commands_insert\"), 1s) AS inserts_per_second, non_negative_derivative(last(\"commands_update\"), 1s) AS updates_per_second, non_negative_derivative(last(\"commands_delete\"), 1s) AS deletes_per_second FROM \":db:\".\":rp:\".\"mysql\"", - "groupbys": ["\"server\""], - "wheres": [] - }, - { - "query": - "SELECT non_negative_derivative(last(\"com_insert\"), 1s) AS inserts_per_second, non_negative_derivative(last(\"com_update\"), 1s) AS updates_per_second, non_negative_derivative(last(\"com_delete\"), 1s) AS deletes_per_second FROM \":db:\".\":rp:\".\"mysql\"", - "groupbys": ["\"server\""], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "2179fd37-6380-47dc-a1f3-959b69d8f9ec", - "name": "MySQL – Connections/Second", - "queries": [ - { - "query": - "SELECT non_negative_derivative(last(\"threads_connected\"), 1s) AS cxn_per_second, non_negative_derivative(last(\"threads_running\"), 1s) AS threads_running_per_second FROM \":db:\".\":rp:\".\"mysql\"", - "groupbys": ["\"server\""], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "b13816b7-041d-4387-b593-86898aa379ab", - "name": "MySQL – Connections Errors/Second", - "queries": [ - { - "query": - "SELECT non_negative_derivative(last(\"connection_errors_max_connections\"), 1s) AS cxn_errors_per_second, non_negative_derivative(last(\"connection_errors_internal\"), 1s) AS internal_cxn_errors_per_second, non_negative_derivative(last(\"aborted_connects\"), 1s) AS cxn_aborted_per_second FROM \":db:\".\":rp:\".\"mysql\"", - "groupbys": ["\"server\""], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/net.json b/chronograf/canned/net.json deleted file mode 100644 index c108e86d706..00000000000 --- a/chronograf/canned/net.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "id": "4585a7db-73af-4ca1-9378-47ee67c71f99", - "measurement": "net", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "e2f65d45-1898-4a16-860c-14b655575925", - "name": "System – Network Mb/s", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"bytes_recv\"), 1s) / 125000 as \"rx_megabits_per_second\" FROM \":db:\".\":rp:\".\"net\"", - "groupbys": [], - "wheres": [], - "label": "Mb/s" - }, - { - "query": "SELECT non_negative_derivative(max(\"bytes_sent\"), 1s) / 125000 as \"tx_megabits_per_second\" FROM \":db:\".\":rp:\".\"net\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "5e957624-b28b-4904-8068-5e7a9a058609", - "name": "System – Network Error Rate", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"err_in\"), 1s) / 125000 as \"tx_errors_per_second\" FROM \":db:\".\":rp:\".\"net\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"err_out\"), 1s) / 125000 as \"rx_errors_per_second\" FROM \":db:\".\":rp:\".\"net\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/netstat.json b/chronograf/canned/netstat.json deleted file mode 100644 index f85a69eabbe..00000000000 --- a/chronograf/canned/netstat.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "id": "ff41d044-f61a-4522-8de7-9e39e3a1b5de", - "measurement": "netstat", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "cf5d0608-b513-4244-a55f-accf520da3a1", - "name": "System - Open Sockets", - "queries": [ - { - "query": "SELECT mean(\"tcp_established\") AS \"tcp_established\" FROM \":db:\".\":rp:\".\"netstat\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT mean(\"udp_socket\") AS \"udp_socket\" FROM \":db:\".\":rp:\".\"netstat\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "63503235-a588-49a7-ae0a-fb015c888e5b", - "name": "System - Sockets Created/Second ", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"tcp_established\")) AS \"tcp_established\" FROM \":db:\".\":rp:\".\"netstat\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT non_negative_derivative(max(\"udp_socket\")) AS \"udp_socket\" FROM \":db:\".\":rp:\".\"netstat\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/new_apps.sh b/chronograf/canned/new_apps.sh deleted file mode 100755 index 7dec94db93d..00000000000 --- a/chronograf/canned/new_apps.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh - -measurement= - -# Usage info -show_help() { - -cat << EOF -Usage: ${0##*/} MEASUREMENT -Generate new layout for MEASUREMENT. File created will be named -MEASUREMENT.json with UUID being generated from the uuidgen command. - - -h display this help and exit -EOF -} - -while :; do - case $1 in - -h|-\?|--help) # Call a "show_help" function to display a synopsis, then exit. - show_help - exit - ;; - *) # Default case: If no more options then break out of the loop. - measurement=$1 - break - esac - shift -done - -if [ -z "$measurement" ]; then - show_help - exit -fi - -CELLID=$(uuidgen | tr A-Z a-z) -UUID=$(uuidgen | tr A-Z a-z) -APP_FILE="$measurement".json -echo Creating measurement file $APP_FILE -cat > $APP_FILE << EOF -{ - "id": "$UUID", - "measurement": "$measurement", - "app": "$measurement", - "cells": [{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "$CELLID", - "name": "User facing cell Name", - "queries": [{ - "query": "select mean(\"used_percent\") from disk", - "groupbys": [], - "wheres": [] - }] - }] -} -EOF diff --git a/chronograf/canned/nginx.json b/chronograf/canned/nginx.json deleted file mode 100644 index 723431dfe7b..00000000000 --- a/chronograf/canned/nginx.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "id": "b805d661-e5a3-45e4-af18-de0e9360e6e7", - "measurement": "nginx", - "app": "nginx", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "a209be7f-33c6-4612-88b2-848ae402c66a", - "name": "NGINX – Client Connections", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"accepts\"), 1s) AS \"accepts\", non_negative_derivative(max(\"handled\"), 1s) AS \"handled\", non_negative_derivative(max(\"active\"), 1s) AS \"active\" FROM \":db:\".\":rp:\".\"nginx\"", - "groupbys": [ - "\"server\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "0fc591ad-8541-4de3-a36e-4ae69ff954c4", - "name": "NGINX – Client Errors", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"accepts\")) - non_negative_derivative(max(\"handled\")) FROM \":db:\".\":rp:\".\"nginx\"", - "groupbys": [ - "\"server\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "a1f37574-b86e-4278-8acc-ba78d3ac2e4e", - "name": "NGINX – Client Requests", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"requests\"), 1s) AS \"requests\" FROM \":db:\".\":rp:\".\"nginx\"", - "groupbys": [ - "\"server\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "5b91c5b0-d270-4d03-aeae-007f2351c80c", - "name": "NGINX – Active Client State", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"waiting\"), 1s) AS \"waiting\", non_negative_derivative(max(\"reading\"), 1s) AS \"reading\", non_negative_derivative(max(\"writing\"), 1s) AS \"writing\" FROM \":db:\".\":rp:\".\"nginx\"", - "groupbys": [ - "\"server\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/nsq_channel.json b/chronograf/canned/nsq_channel.json deleted file mode 100644 index ad23930918d..00000000000 --- a/chronograf/canned/nsq_channel.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "id": "7b035812-182a-4a94-ba2e-902dfb81e0a2", - "measurement": "nsq_channel", - "app": "nsq", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "89dad9c8-3391-400e-a44a-b5d4a2c53bf1", - "name": "NSQ - Channel Client Count", - "queries": [ - { - "query": "SELECT mean(\"client_count\") AS \"client_count\" FROM \":db:\".\":rp:\".\"nsq_channel\"", - "groupbys": [ - "\"topic\"", - "\"channel\"" - ], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "e3eb48c0-8283-4445-b174-f4f8e4182e45", - "name": "NSQ - Channel Messages Count", - "queries": [ - { - "query": "SELECT mean(\"message_count\") AS \"message_count\" FROM \":db:\".\":rp:\".\"nsq_channel\"", - "groupbys": [ - "\"topic\"", - "\"channel\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/nsq_server.json b/chronograf/canned/nsq_server.json deleted file mode 100644 index 7e8eab2e4c6..00000000000 --- a/chronograf/canned/nsq_server.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "id": "6c351881-05ec-48f1-b11b-9c36d2c7cc80", - "measurement": "nsq_server", - "app": "nsq", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "c376a3d8-cd2a-4212-bf1d-da776b75feeb", - "name": "NSQ - Topic Count", - "queries": [ - { - "query": "SELECT mean(\"topic_count\") AS \"topic_count\" FROM \":db:\".\":rp:\".\"nsq_server\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "aa1aa20a-48aa-4a42-aaa0-426aa6a58aa8", - "name": "NSQ - Server Count", - "queries": [ - { - "query": "SELECT mean(\"server_count\") AS \"server_count\" FROM \":db:\".\":rp:\".\"nsq_server\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/nsq_topic.json b/chronograf/canned/nsq_topic.json deleted file mode 100644 index b1ce7454fac..00000000000 --- a/chronograf/canned/nsq_topic.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "id": "f7be6717-61df-4e58-ac4a-e4f49f95d847", - "measurement": "nsq_topic", - "app": "nsq", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "f07967cb-2c2a-41cb-8420-f041f46b0635", - "name": "NSQ - Topic Messages", - "queries": [ - { - "query": "SELECT mean(\"depth\") AS \"depth\" FROM \":db:\".\":rp:\".\"nsq_topic\"", - "groupbys": [ - "\"topic\"" - ], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "912f337b-3af2-42af-9352-b31a5bc3b431", - "name": "NSQ - Topic Messages on Disk", - "queries": [ - { - "query": "SELECT mean(\"backend_depth\") AS \"backend_depth\" FROM \":db:\".\":rp:\".\"nsq_topic\"", - "groupbys": [ - "\"topic\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 4, - "w": 4, - "h": 4, - "i": "06909f21-f035-4668-8193-8e06a018accb", - "name": "NSQ - Topic Ingress", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"message_count\")) AS \"messages_per_second\" FROM \":db:\".\":rp:\".\"nsq_topic\"", - "groupbys": [ - "\"topic\"", - "\"host\"" - ], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 4, - "w": 4, - "h": 4, - "i": "a5aa73a5-42aa-464a-aaaa-0a7a50632a0a", - "name": "NSQ topic egress", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"message_count\")) - non_negative_derivative(max(\"depth\")) AS \"messages_per_second\" FROM \":db:\".\":rp:\".\"nsq_topic\"", - "groupbys": [ - "\"topic\"", - "\"host\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/phpfpm.json b/chronograf/canned/phpfpm.json deleted file mode 100644 index b2a105eebc5..00000000000 --- a/chronograf/canned/phpfpm.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "id": "e6b69c66-6183-4728-9f1d-1b0f1fc01b7d", - "measurement": "phpfpm", - "app": "phpfpm", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "da42044d-8d10-4e3c-a0a2-41512266fd00", - "name": "phpfpm – Accepted Connections", - "queries": [ - { - "query": "SELECT non_negative_derivative(mean(\"accepted_conn\"),1s) FROM \":db:\".\":rp:\".\"phpfpm\"", - "label": "count", - "groupbys": [ - "\"pool\"" - ] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "7aae5ec6-dbaf-4926-b922-d585e6a869be", - "name": "phpfpm – Processes", - "queries": [ - { - "query": "SELECT mean(\"active_processes\") as \"active\",mean(\"idle_processes\") as \"idle\" FROM \":db:\".\":rp:\".\"phpfpm\"", - "label": "count", - "groupbys": [ - "\"pool\"" - ] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "e4de9091-7250-4634-bf38-81a441ef0f27", - "name": "phpfpm – Slow Requests", - "queries": [ - { - "query": "SELECT non_negative_derivative(mean(\"slow_requests\"),1s) FROM \":db:\".\":rp:\".\"phpfpm\"", - "label": "count", - "groupbys": [ - "\"pool\"" - ] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "7ed72ef0-a429-4edd-9c8e-a11625a279c2", - "name": "phpfpm – Max Children Reached", - "queries": [ - { - "query": "SELECT mean(\"max_children_reached\") FROM \":db:\".\":rp:\".\"phpfpm\"", - "label": "count", - "groupbys": [ - "\"pool\"" - ] - } - ] - } - ] -} diff --git a/chronograf/canned/ping.json b/chronograf/canned/ping.json deleted file mode 100644 index e732a05d874..00000000000 --- a/chronograf/canned/ping.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "id": "6fba9b06-b9d3-4e67-a41e-177d585dfe28", - "measurement": "ping", - "app": "ping", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "f58a157b-9f2f-4175-94c7-c250d9491c11", - "name": "Ping – Packet Loss Percent", - "queries": [ - { - "query": "select max(\"percent_packet_loss\") as \"packet_loss\" from ping", - "groupbys": [ - "\"url\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "865f646f-6ed9-4878-81f6-2b9e0d40697d", - "name": "Ping – Response Times (ms)", - "queries": [ - { - "query": "select mean(\"average_response_ms\") as \"average\", mean(\"minimum_response_ms\") as \"min\", mean(\"maximum_response_ms\") as \"max\" from ping", - "groupbys": [ - "\"url\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/postgresql.json b/chronograf/canned/postgresql.json deleted file mode 100644 index 5bf1b3586d9..00000000000 --- a/chronograf/canned/postgresql.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "id": "0975a073-9eb8-471c-aaf2-692b65f9fe5c", - "measurement": "postgresql", - "app": "postgresql", - "autoflow": false, - "cells": [ - { - "x": 0, - "y": 0, - "w": 12, - "h": 4, - "i": "b417bc9f-b16d-4691-91a7-85adfdd3e8ec", - "name": "PostgreSQL - Rows", - "queries": [ - { - "query": "SELECT non_negative_derivative(mean(\"tup_fetched\")) AS \"fetched\", non_negative_derivative(mean(\"tup_returned\")) AS \"returned\", non_negative_derivative(mean(\"tup_inserted\")) AS \"inserted\", non_negative_derivative(mean(\"tup_updated\")) AS \"updated\" FROM \":db:\".\":rp:\".\"postgresql\"", - "groupbys": [ - "db" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 8, - "w": 12, - "h": 4, - "i": "230d5baa-9376-438c-9a55-6f97f8c68e69", - "name": "PostgreSQL - QPS", - "queries": [ - { - "query": "SELECT non_negative_derivative(mean(\"xact_commit\")) AS \"xact_commit\" FROM \":db:\".\":rp:\".\"postgresql\"", - "groupbys": [ - "db" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 4, - "w": 6, - "h": 4, - "i": "4762130d-7005-467f-80ad-8c7f6dfe822e", - "name": "PostgreSQL - Buffers", - "queries": [ - { - "query": "SELECT mean(\"buffers_alloc\") AS \"buffers_allocated\", mean(\"buffers_backend\") AS \"buffers_backend\", mean(\"buffers_backend_fsync\") AS \"buffers_backend_fsync\", mean(\"buffers_checkpoint\") AS \"buffers_checkpoint\", mean(\"buffers_clean\") AS \"buffers_clean\" FROM \":db:\".\":rp:\".\"postgresql\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 6, - "y": 4, - "w": 6, - "h": 4, - "i": "95e73bda-7527-4aca-89dd-109cb6bb4294", - "name": "PostgreSQL - Conflicts/Deadlocks", - "queries": [ - { - "query": "SELECT mean(\"conflicts\") AS \"conflicts\", mean(\"deadlocks\") AS \"deadlocks\" FROM \":db:\".\":rp:\".\"postgresql\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/processes.json b/chronograf/canned/processes.json deleted file mode 100644 index 2b8af87a574..00000000000 --- a/chronograf/canned/processes.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "ffad2dff-d263-412e-806a-1e836af87942", - "measurement": "processes", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "84048146-f93d-4d6c-b7dd-c8e2a68abb27", - "name": "System - Total Processes", - "queries": [ - { - "query": "SELECT mean(\"total\") AS \"total\" FROM \":db:\".\":rp:\".\"processes\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/procstat.json b/chronograf/canned/procstat.json deleted file mode 100644 index aa162a0befe..00000000000 --- a/chronograf/canned/procstat.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "id": "44644fae-21e7-4897-81e6-b11d2643cd61", - "measurement": "procstat", - "app": "system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "e75a6baa-9938-4ade-b83f-55a239039964", - "name": "Processes – Resident Memory (MB)", - "queries": [ - { - "query": "SELECT max(\"memory_rss\") / 1000000 AS \"max_mb_memory_rss\" FROM \":db:\".\":rp:\".\"procstat\"", - "groupbys": ["\"exe\""], - "wheres": [], - "label": "MB" - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "2bfae447-47c6-4f85-9fec-494301d29a04", - "name": "Processes – CPU Usage %", - "queries": [ - { - "query": "SELECT max(\"cpu_usage\") AS \"cpu_usage\" FROM \":db:\".\":rp:\".\"procstat\"", - "groupbys": ["\"exe\""], - "wheres": [], - "label": "%" - } - ] - } - ] -} \ No newline at end of file diff --git a/chronograf/canned/rabbitmq.json b/chronograf/canned/rabbitmq.json deleted file mode 100644 index fac94df3b20..00000000000 --- a/chronograf/canned/rabbitmq.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "id": "0c57a644-aa74-4ec3-b099-b44499df1159", - "measurement": "rabbitmq_node", - "app": "rabbitmq", - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "c46351c6-b33a-4dc2-a053-3517e7c8098e", - "name": "RabbitMQ - Overview", - "queries": [ - { - "query": "select mean(\"consumers\") AS \"consumers\" from rabbitmq_overview", - "groupbys": [], - "wheres": [] - }, - { - "query": "select mean(\"exchanges\") AS \"exchanges\" from rabbitmq_overview", - "groupbys": [], - "wheres": [] - }, - { - "query": "select mean(\"queues\") AS \"queues\" from rabbitmq_overview", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "c46351c6-b33a-4dc2-a053-3517e6c8098e", - "name": "RabbitMQ - Published/Delivered per second", - "queries": [ - { - "query": "select derivative(mean(\"messages_published\"), 1s) AS \"published_per_sec\" from rabbitmq_overview", - "groupbys": [], - "wheres": [] - }, - { - "query": "select derivative(mean(\"messages_delivered\"), 1s) AS \"delivered_per_sec\" from rabbitmq_overview", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "c46351c6-b33a-4dc2-a053-3547e7c8098e", - "name": "RabbitMQ - Acked/Unacked per second", - "queries": [ - { - "query": "select derivative(mean(\"messages_acked\"), 1s) AS \"acked_per_sec\" from rabbitmq_overview", - "groupbys": [], - "wheres": [] - }, - { - "query": "select derivative(mean(\"messages_unacked\"), 1s) AS \"unacked_per_sec\" from rabbitmq_overview", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/redis.json b/chronograf/canned/redis.json deleted file mode 100644 index 1a80b426324..00000000000 --- a/chronograf/canned/redis.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "id": "793e6cca-7d7f-48e4-8db2-7b81761cc6ff", - "measurement": "redis", - "app": "redis", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9c168ac8-2985-4883-bdf2-938ea9f065b9", - "name": "Redis - Connected Clients", - "queries": [ - { - "query": "SELECT mean(\"clients\") AS \"clients\" FROM \":db:\".\":rp:\".\"redis\"", - "groupbys": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9c168ac8-2985-4883-bdf2-938ea9f065a0", - "name": "Redis - Blocked Clients", - "queries": [ - { - "query": "SELECT mean(\"blocked_clients\") AS \"blocked_clients\" FROM \":db:\".\":rp:\".\"redis\"", - "groupbys": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9c168ac8-2985-4883-bdf2-938ea9f065b1", - "name": "Redis - CPU", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"used_cpu_user\")) AS \"used_cpu_per_second\" FROM \":db:\".\":rp:\".\"redis\"", - "groupbys": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9c168ac8-2985-4883-bdf2-938ea9f065b2", - "name": "Redis - Memory", - "queries": [ - { - "query": "SELECT non_negative_derivative(max(\"used_memory\")) AS \"used_memory_per_second\" FROM \":db:\".\":rp:\".\"redis\"", - "groupbys": [] - } - ] - } - ] -} diff --git a/chronograf/canned/riak.json b/chronograf/canned/riak.json deleted file mode 100644 index b52b4efdd2c..00000000000 --- a/chronograf/canned/riak.json +++ /dev/null @@ -1,127 +0,0 @@ -{ - "id": "f56fd522-3e9c-492d-88fe-34e05d6d2462", - "measurement": "riak", - "app": "riak", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "e12ebb94-2592-4b83-86fc-1f8a9aa84262", - "name": "Riak – Total Memory Bytes", - "queries": [ - { - "query": "SELECT max(\"memory_total\") as memory_total_bytes FROM \":db:\".\":rp:\".\"riak\"", - "groupbys": [ - "\"nodename\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "8355d65d-34a7-4b6e-ae54-eaf25cd14e4b", - "name": "Riak – Object Byte Size", - "queries": [ - { - "query": "SELECT max(\"node_get_fsm_objsize_median\") AS \"median\", max(\"node_get_fsm_objsize_100\") AS \"100th-percentile\", max(\"node_get_fsm_objsize_99\") AS \"99th-percentile\", max(\"node_get_fsm_objsize_mean\") AS \"mean\", max(\"node_get_fsm_objsize_95\") AS \"95th-percentile\" FROM \":db:\".\":rp:\".\"riak\"", - "groupbys": [ - "\"nodename\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "91e26cbe-1595-4d17-a54b-c26e08ecf572", - "name": "Riak – Number of Siblings/Minute", - "queries": [ - { - "query": "SELECT max(\"node_get_fsm_siblings_median\") AS \"median\", max(\"node_get_fsm_siblings_mean\") AS \"mean\", max(\"node_get_fsm_siblings_99\") AS \"99th-percentile\", max(\"node_get_fsm_siblings_95\") AS \"95h-percentile\", max(\"node_get_fsm_siblings_100\") AS \"100th-percentile\" FROM \":db:\".\":rp:\".\"riak\"", - "groupbys": [ - "\"nodename\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "eefbdfec-8578-46a8-a0d5-1247d1d4cf97", - "name": "Riak – Latency (ms)", - "queries": [ - { - "query": "SELECT max(\"node_put_fsm_time_median\") / 1000 AS \"median_put_milliseconds\", max(\"node_get_fsm_time_median\") / 1000 AS \"median_get_milliseconds\" FROM \":db:\".\":rp:\".\"riak\"", - "groupbys": [ - "\"nodename\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "48f268ae-3218-4b07-a2e9-575f89e2d6c9", - "name": "Riak – Reads and Writes/Minute", - "queries": [ - { - "query": "SELECT max(\"node_puts\") AS \"puts_per_minute\", max(\"node_gets\") AS \"gets_per_minute\" FROM \":db:\".\":rp:\".\"riak\"", - "groupbys": [ - "\"nodename\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "f7c601c2-1007-49ec-bbcd-3f3e678ba781", - "name": "Riak – Active Connections", - "queries": [ - { - "query": "SELECT max(\"pbc_active\") AS \"active_protobuf_connections\" FROM \":db:\".\":rp:\".\"riak\"", - "groupbys": [ - "\"nodename\"" - ], - "wheres": [] - } - ] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "f29575f9-6b78-465c-b055-b518d6eda09d", - "name": "Riak – Read Repairs/Minute", - "queries": [ - { - "query": "SELECT max(\"read_repairs\") AS \"read_repairs_per_minute\" FROM \":db:\".\":rp:\".\"riak\"", - "groupbys": [ - "\"nodename\"" - ], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/uuid.sh b/chronograf/canned/uuid.sh deleted file mode 100755 index 0bb54464e0a..00000000000 --- a/chronograf/canned/uuid.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -uuidgen | tr A-Z a-z diff --git a/chronograf/canned/varnish.json b/chronograf/canned/varnish.json deleted file mode 100644 index a81c81fd561..00000000000 --- a/chronograf/canned/varnish.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "83c57d16-a778-43ed-8941-0f9fec3408fa", - "measurement": "varnish", - "app": "varnish", - "cells": [ - { - "x": 0, - "y": 0, - "w": 12, - "h": 4, - "i": "10b406cc-50a8-4c14-bf0e-5fe8bce1661c", - "name": "Varnish - Cache Hits/Misses", - "queries": [ - { - "query": "select non_negative_derivative(mean(cache_hit)) as hits, non_negative_derivative(mean(cache_miss)) as misses from varnish", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/win_cpu.json b/chronograf/canned/win_cpu.json deleted file mode 100644 index c6b0505f961..00000000000 --- a/chronograf/canned/win_cpu.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "188b7f23-648f-4c54-99f6-6a0e2e90a2fc", - "measurement": "win_cpu", - "app": "win_system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "6921e19a-951e-42ef-b304-2b8b661fcc81", - "name": "System - CPU Usage", - "queries": [ - { - "query": "SELECT mean(\"Percent_Processor_Time\") AS \"percent_processor_time\" FROM \":db:\".\":rp:\".\"win_cpu\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/win_mem.json b/chronograf/canned/win_mem.json deleted file mode 100644 index c14dc32cb30..00000000000 --- a/chronograf/canned/win_mem.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "cef6c954-f066-4348-9425-4132429fe817", - "measurement": "win_mem", - "app": "win_system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1c275ca5-84a7-4146-9cf0-8ed654abb627", - "name": "System - Available Bytes", - "queries": [ - { - "query": "SELECT mean(\"Available_Bytes\") AS \"available_bytes\" FROM \":db:\".\":rp:\".\"win_mem\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/win_net.json b/chronograf/canned/win_net.json deleted file mode 100644 index 7d378b6f278..00000000000 --- a/chronograf/canned/win_net.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "id": "d795c66f-0d8a-4fc0-b7bf-2cef1d2f4519", - "measurement": "win_net", - "app": "win_system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "3bf8c678-5904-46e7-9c9f-d0d16f0c3fc4", - "name": "System - TX Bytes/Second", - "queries": [ - { - "query": "SELECT mean(\"Bytes_Sent_persec\") AS \"bytes_sent\" FROM \":db:\".\":rp:\".\"win_net\"", - "groupbys": [], - "wheres": [] - } - ] - }, - { - "x": 4, - "y": 0, - "w": 4, - "h": 4, - "i": "46963ea2-b09b-4dcf-b08b-7cbcd8766f77", - "name": "RX Bytes/Second", - "queries": [ - { - "query": "SELECT mean(\"Bytes_Received_persec\") AS \"bytes_received\" FROM \":db:\".\":rp:\".\"win_net\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/win_system.json b/chronograf/canned/win_system.json deleted file mode 100644 index 77c9851ed25..00000000000 --- a/chronograf/canned/win_system.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "96bd0303-19b6-4f87-a0f9-2755c6178ba7", - "measurement": "win_system", - "app": "win_system", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "d959c815-16a8-4a2b-a6ea-e37af38d4e2f", - "name": "System - Load", - "queries": [ - { - "query": "SELECT mean(\"Processor_Queue_Length\") AS \"load\" FROM \":db:\".\":rp:\".\"win_system\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/canned/win_websvc.json b/chronograf/canned/win_websvc.json deleted file mode 100644 index b8dfa743002..00000000000 --- a/chronograf/canned/win_websvc.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "id": "c7644755-505d-46f0-b278-5c29268293b2", - "measurement": "win_websvc", - "app": "iis", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "3539e3c3-ac15-49d3-9de8-64cd514588ca", - "name": "IIS - Service", - "queries": [ - { - "query": "SELECT mean(\"Get_Requests_persec\") AS \"gets\" FROM \":db:\".\":rp:\".\"win_websvc\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT mean(\"Post_Requests_persec\") AS \"posts\" FROM \":db:\".\":rp:\".\"win_websvc\"", - "groupbys": [], - "wheres": [] - }, - { - "query": "SELECT mean(\"Current_Connections\") AS \"connections\" FROM \":db:\".\":rp:\".\"win_websvc\"", - "groupbys": [], - "wheres": [] - } - ] - } - ] -} diff --git a/chronograf/chronograf.go b/chronograf/chronograf.go deleted file mode 100644 index 96f15bcb207..00000000000 --- a/chronograf/chronograf.go +++ /dev/null @@ -1,837 +0,0 @@ -package chronograf - -import ( - "context" - "io" - "net/http" - "time" -) - -// General errors. -const ( - ErrUpstreamTimeout = Error("request to backend timed out") - ErrSourceNotFound = Error("source not found") - ErrServerNotFound = Error("server not found") - ErrLayoutNotFound = Error("layout not found") - ErrDashboardNotFound = Error("dashboard not found") - ErrUserNotFound = Error("user not found") - ErrLayoutInvalid = Error("layout is invalid") - ErrDashboardInvalid = Error("dashboard is invalid") - ErrSourceInvalid = Error("source is invalid") - ErrServerInvalid = Error("server is invalid") - ErrAlertNotFound = Error("alert not found") - ErrAuthentication = Error("user not authenticated") - ErrUninitialized = Error("client uninitialized. Call Open() method") - ErrInvalidAxis = Error("Unexpected axis in cell. Valid axes are 'x', 'y', and 'y2'") - ErrInvalidColorType = Error("Invalid color type. Valid color types are 'min', 'max', 'threshold', 'text', and 'background'") - ErrInvalidColor = Error("Invalid color. Accepted color format is #RRGGBB") - ErrInvalidLegend = Error("Invalid legend. Both type and orientation must be set") - ErrInvalidLegendType = Error("Invalid legend type. Valid legend type is 'static'") - ErrInvalidLegendOrient = Error("Invalid orientation type. Valid orientation types are 'top', 'bottom', 'right', 'left'") - ErrUserAlreadyExists = Error("user already exists") - ErrOrganizationNotFound = Error("organization not found") - ErrMappingNotFound = Error("mapping not found") - ErrOrganizationAlreadyExists = Error("organization already exists") - ErrCannotDeleteDefaultOrganization = Error("cannot delete default organization") - ErrConfigNotFound = Error("cannot find configuration") - ErrAnnotationNotFound = Error("annotation not found") - ErrInvalidCellOptionsText = Error("invalid text wrapping option. Valid wrappings are 'truncate', 'wrap', and 'single line'") - ErrInvalidCellOptionsSort = Error("cell options sortby cannot be empty'") - ErrInvalidCellOptionsColumns = Error("cell options columns cannot be empty'") - ErrOrganizationConfigNotFound = Error("could not find organization config") -) - -// Error is a domain error encountered while processing chronograf requests -type Error string - -func (e Error) Error() string { - return string(e) -} - -// Logger represents an abstracted structured logging implementation. It -// provides methods to trigger log messages at various alert levels and a -// WithField method to set keys for a structured log message. -type Logger interface { - Debug(...interface{}) - Info(...interface{}) - Error(...interface{}) - - WithField(string, interface{}) Logger - - // Logger can be transformed into an io.Writer. - // That writer is the end of an io.Pipe and it is your responsibility to close it. - Writer() *io.PipeWriter -} - -// NoopLogger is a chronograf logger that does nothing. -type NoopLogger struct{} - -func (l *NoopLogger) Debug(...interface{}) { -} - -func (l *NoopLogger) Info(...interface{}) { -} - -func (l *NoopLogger) Error(...interface{}) { -} - -func (l *NoopLogger) WithField(string, interface{}) Logger { - return l -} - -func (l *NoopLogger) Writer() *io.PipeWriter { - return nil -} - -// Router is an abstracted Router based on the API provided by the -// julienschmidt/httprouter package. -type Router interface { - http.Handler - GET(string, http.HandlerFunc) - PATCH(string, http.HandlerFunc) - POST(string, http.HandlerFunc) - DELETE(string, http.HandlerFunc) - PUT(string, http.HandlerFunc) - - Handler(string, string, http.Handler) -} - -// Assets returns a handler to serve the website. -type Assets interface { - Handler() http.Handler -} - -// Supported time-series databases -const ( - // InfluxDB is the open-source time-series database - InfluxDB = "influx" - // InfluxEnteprise is the clustered HA time-series database - InfluxEnterprise = "influx-enterprise" - // InfluxRelay is the basic HA layer over InfluxDB - InfluxRelay = "influx-relay" -) - -// TSDBStatus represents the current status of a time series database -type TSDBStatus interface { - // Connect will connect to the time series using the information in `Source`. - Connect(ctx context.Context, src *Source) error - // Ping returns version and TSDB type of time series database if reachable. - Ping(context.Context) error - // Version returns the version of the TSDB database - Version(context.Context) (string, error) - // Type returns the type of the TSDB database - Type(context.Context) (string, error) -} - -// Point is a field set in a series -type Point struct { - Database string - RetentionPolicy string - Measurement string - Time int64 - Tags map[string]string - Fields map[string]interface{} -} - -// TimeSeries represents a queryable time series database. -type TimeSeries interface { - // Connect will connect to the time series using the information in `Source`. - Connect(context.Context, *Source) error - // Query retrieves time series data from the database. - Query(context.Context, Query) (Response, error) - // Write records points into a series - Write(context.Context, []Point) error - // UsersStore represents the user accounts within the TimeSeries database - Users(context.Context) UsersStore - // Permissions returns all valid names permissions in this database - Permissions(context.Context) Permissions - // Roles represents the roles associated with this TimesSeriesDatabase - Roles(context.Context) (RolesStore, error) -} - -// Role is a restricted set of permissions assigned to a set of users. -type Role struct { - Name string `json:"name"` - Permissions Permissions `json:"permissions,omitempty"` - Users []User `json:"users,omitempty"` - Organization string `json:"organization,omitempty"` -} - -// RolesStore is the Storage and retrieval of authentication information -type RolesStore interface { - // All lists all roles from the RolesStore - All(context.Context) ([]Role, error) - // Create a new Role in the RolesStore - Add(context.Context, *Role) (*Role, error) - // Delete the Role from the RolesStore - Delete(context.Context, *Role) error - // Get retrieves a role if name exists. - Get(ctx context.Context, name string) (*Role, error) - // Update the roles' users or permissions - Update(context.Context, *Role) error -} - -// Range represents an upper and lower bound for data -type Range struct { - Upper int64 `json:"upper"` // Upper is the upper bound - Lower int64 `json:"lower"` // Lower is the lower bound -} - -// TemplateValue is a value use to replace a template in an InfluxQL query -type TemplateValue struct { - Value string `json:"value"` // Value is the specific value used to replace a template in an InfluxQL query - Type string `json:"type"` // Type can be tagKey, tagValue, fieldKey, csv, map, measurement, database, constant, influxql - Selected bool `json:"selected"` // Selected states that this variable has been picked to use for replacement - Key string `json:"key,omitempty"` // Key is the key for the Value if the Template Type is 'map' -} - -// TemplateVar is a named variable within an InfluxQL query to be replaced with Values -type TemplateVar struct { - Var string `json:"tempVar"` // Var is the string to replace within InfluxQL - Values []TemplateValue `json:"values"` // Values are the replacement values within InfluxQL -} - -// TemplateID is the unique ID used to identify a template -type TemplateID string - -// Template represents a series of choices to replace TemplateVars within InfluxQL -type Template struct { - TemplateVar - ID TemplateID `json:"id"` // ID is the unique ID associated with this template - Type string `json:"type"` // Type can be fieldKeys, tagKeys, tagValues, csv, constant, measurements, databases, map, influxql, text - Label string `json:"label"` // Label is a user-facing description of the Template - Query *TemplateQuery `json:"query,omitempty"` // Query is used to generate the choices for a template -} - -// Query retrieves a Response from a TimeSeries. -type Query struct { - Command string `json:"query"` // Command is the query itself - DB string `json:"db,omitempty"` // DB is optional and if empty will not be used. - RP string `json:"rp,omitempty"` // RP is a retention policy and optional; if empty will not be used. - Epoch string `json:"epoch,omitempty"` // Epoch is the time format for the return results - Wheres []string `json:"wheres,omitempty"` // Wheres restricts the query to certain attributes - GroupBys []string `json:"groupbys,omitempty"` // GroupBys collate the query by these tags - Label string `json:"label,omitempty"` // Label is the Y-Axis label for the data - Range *Range `json:"range,omitempty"` // Range is the default Y-Axis range for the data -} - -// DashboardQuery includes state for the query builder. This is a transition -// struct while we move to the full InfluxQL AST -type DashboardQuery struct { - Command string `json:"query"` // Command is the query itself - Label string `json:"label,omitempty"` // Label is the Y-Axis label for the data - Range *Range `json:"range,omitempty"` // Range is the default Y-Axis range for the data - QueryConfig QueryConfig `json:"queryConfig,omitempty"` // QueryConfig represents the query state that is understood by the data explorer - Source string `json:"source"` // Source is the optional URI to the data source for this queryConfig - Shifts []TimeShift `json:"-"` // Shifts represents shifts to apply to an influxql query's time range. Clients expect the shift to be in the generated QueryConfig - // This was added after this code was brought over to influxdb. - Type string `json:"type,omitempty"` -} - -// TemplateQuery is used to retrieve choices for template replacement -type TemplateQuery struct { - Command string `json:"influxql"` // Command is the query itself - DB string `json:"db,omitempty"` // DB is optional and if empty will not be used. - RP string `json:"rp,omitempty"` // RP is a retention policy and optional; if empty will not be used. - Measurement string `json:"measurement"` // Measurement is the optionally selected measurement for the query - TagKey string `json:"tagKey"` // TagKey is the optionally selected tag key for the query - FieldKey string `json:"fieldKey"` // FieldKey is the optionally selected field key for the query -} - -// Response is the result of a query against a TimeSeries -type Response interface { - MarshalJSON() ([]byte, error) -} - -// Source is connection information to a time-series data store. -type Source struct { - ID int `json:"id,string"` // ID is the unique ID of the source - Name string `json:"name"` // Name is the user-defined name for the source - Type string `json:"type,omitempty"` // Type specifies which kinds of source (enterprise vs oss) - Username string `json:"username,omitempty"` // Username is the username to connect to the source - Password string `json:"password,omitempty"` // Password is in CLEARTEXT - SharedSecret string `json:"sharedSecret,omitempty"` // ShareSecret is the optional signing secret for Influx JWT authorization - URL string `json:"url"` // URL are the connections to the source - MetaURL string `json:"metaUrl,omitempty"` // MetaURL is the url for the meta node - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` // InsecureSkipVerify as true means any certificate presented by the source is accepted. - Default bool `json:"default"` // Default specifies the default source for the application - Telegraf string `json:"telegraf"` // Telegraf is the db telegraf is written to. By default it is "telegraf" - Organization string `json:"organization"` // Organization is the organization ID that resource belongs to - Role string `json:"role,omitempty"` // Not Currently Used. Role is the name of the minimum role that a user must possess to access the resource. - DefaultRP string `json:"defaultRP"` // DefaultRP is the default retention policy used in database queries to this source -} - -// SourcesStore stores connection information for a `TimeSeries` -type SourcesStore interface { - // All returns all sources in the store - All(context.Context) ([]Source, error) - // Add creates a new source in the SourcesStore and returns Source with ID - Add(context.Context, Source) (Source, error) - // Delete the Source from the store - Delete(context.Context, Source) error - // Get retrieves Source if `ID` exists - Get(ctx context.Context, ID int) (Source, error) - // Update the Source in the store. - Update(context.Context, Source) error -} - -// DBRP represents a database and retention policy for a time series source -type DBRP struct { - DB string `json:"db"` - RP string `json:"rp"` -} - -// AlertRule represents rules for building a tickscript alerting task -type AlertRule struct { - ID string `json:"id,omitempty"` // ID is the unique ID of the alert - TICKScript TICKScript `json:"tickscript"` // TICKScript is the raw tickscript associated with this Alert - Query *QueryConfig `json:"query"` // Query is the filter of data for the alert. - Every string `json:"every"` // Every how often to check for the alerting criteria - AlertNodes AlertNodes `json:"alertNodes"` // AlertNodes defines the destinations for the alert - Message string `json:"message"` // Message included with alert - Details string `json:"details"` // Details is generally used for the Email alert. If empty will not be added. - Trigger string `json:"trigger"` // Trigger is a type that defines when to trigger the alert - TriggerValues TriggerValues `json:"values"` // Defines the values that cause the alert to trigger - Name string `json:"name"` // Name is the user-defined name for the alert - Type string `json:"type"` // Represents the task type where stream is data streamed to kapacitor and batch is queried by kapacitor - DBRPs []DBRP `json:"dbrps"` // List of database retention policy pairs the task is allowed to access - Status string `json:"status"` // Represents if this rule is enabled or disabled in kapacitor - Executing bool `json:"executing"` // Whether the task is currently executing - Error string `json:"error"` // Any error encountered when kapacitor executes the task - Created time.Time `json:"created"` // Date the task was first created - Modified time.Time `json:"modified"` // Date the task was last modified - LastEnabled time.Time `json:"last-enabled,omitempty"` // Date the task was last set to status enabled -} - -// TICKScript task to be used by kapacitor -type TICKScript string - -// Ticker generates tickscript tasks for kapacitor -type Ticker interface { - // Generate will create the tickscript to be used as a kapacitor task - Generate(AlertRule) (TICKScript, error) -} - -// TriggerValues specifies the alerting logic for a specific trigger type -type TriggerValues struct { - Change string `json:"change,omitempty"` // Change specifies if the change is a percent or absolute - Period string `json:"period,omitempty"` // Period length of time before deadman is alerted - Shift string `json:"shift,omitempty"` // Shift is the amount of time to look into the past for the alert to compare to the present - Operator string `json:"operator,omitempty"` // Operator for alert comparison - Value string `json:"value,omitempty"` // Value is the boundary value when alert goes critical - RangeValue string `json:"rangeValue"` // RangeValue is an optional value for range comparisons -} - -// Field represent influxql fields and functions from the UI -type Field struct { - Value interface{} `json:"value"` - Type string `json:"type"` - Alias string `json:"alias"` - Args []Field `json:"args,omitempty"` -} - -// GroupBy represents influxql group by tags from the UI -type GroupBy struct { - Time string `json:"time"` - Tags []string `json:"tags"` -} - -// DurationRange represents the lower and upper durations of the query config -type DurationRange struct { - Upper string `json:"upper"` - Lower string `json:"lower"` -} - -// TimeShift represents a shift to apply to an influxql query's time range -type TimeShift struct { - Label string `json:"label"` // Label user facing description - Unit string `json:"unit"` // Unit influxql time unit representation i.e. ms, s, m, h, d - Quantity string `json:"quantity"` // Quantity number of units -} - -// QueryConfig represents UI query from the data explorer -type QueryConfig struct { - ID string `json:"id,omitempty"` - Database string `json:"database"` - Measurement string `json:"measurement"` - RetentionPolicy string `json:"retentionPolicy"` - Fields []Field `json:"fields"` - Tags map[string][]string `json:"tags"` - GroupBy GroupBy `json:"groupBy"` - AreTagsAccepted bool `json:"areTagsAccepted"` - Fill string `json:"fill,omitempty"` - RawText *string `json:"rawText"` - Range *DurationRange `json:"range"` - Shifts []TimeShift `json:"shifts"` -} - -// KapacitorNode adds arguments and properties to an alert -type KapacitorNode struct { - Name string `json:"name"` - Args []string `json:"args"` - Properties []KapacitorProperty `json:"properties"` - // In the future we could add chaining methods here. -} - -// KapacitorProperty modifies the node they are called on -type KapacitorProperty struct { - Name string `json:"name"` - Args []string `json:"args"` -} - -// Server represents a proxy connection to an HTTP server -type Server struct { - ID int `json:"id,string"` // ID is the unique ID of the server - SrcID int `json:"srcId,string"` // SrcID of the data source - Name string `json:"name"` // Name is the user-defined name for the server - Username string `json:"username"` // Username is the username to connect to the server - Password string `json:"password"` // Password is in CLEARTEXT - URL string `json:"url"` // URL are the connections to the server - InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the server is accepted. - Active bool `json:"active"` // Is this the active server for the source? - Organization string `json:"organization"` // Organization is the organization ID that resource belongs to - Type string `json:"type"` // Type is the kind of service (e.g. kapacitor or flux) - Metadata map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service -} - -// ServersStore stores connection information for a `Server` -type ServersStore interface { - // All returns all servers in the store - All(context.Context) ([]Server, error) - // Add creates a new source in the ServersStore and returns Server with ID - Add(context.Context, Server) (Server, error) - // Delete the Server from the store - Delete(context.Context, Server) error - // Get retrieves Server if `ID` exists - Get(ctx context.Context, ID int) (Server, error) - // Update the Server in the store. - Update(context.Context, Server) error -} - -// ID creates uniq ID string -type ID interface { - // Generate creates a unique ID string - Generate() (string, error) -} - -const ( - // AllScope grants permission for all databases. - AllScope Scope = "all" - // DBScope grants permissions for a specific database - DBScope Scope = "database" -) - -// Permission is a specific allowance for User or Role bound to a -// scope of the data source -type Permission struct { - Scope Scope `json:"scope"` - Name string `json:"name,omitempty"` - Allowed Allowances `json:"allowed"` -} - -// Permissions represent the entire set of permissions a User or Role may have -type Permissions []Permission - -// Allowances defines what actions a user can have on a scoped permission -type Allowances []string - -// Scope defines the location of access of a permission -type Scope string - -// User represents an authenticated user. -type User struct { - ID uint64 `json:"id,string,omitempty"` - Name string `json:"name"` - Passwd string `json:"password,omitempty"` - Permissions Permissions `json:"permissions,omitempty"` - Roles []Role `json:"roles"` - Provider string `json:"provider,omitempty"` - Scheme string `json:"scheme,omitempty"` - SuperAdmin bool `json:"superAdmin,omitempty"` -} - -// UserQuery represents the attributes that a user may be retrieved by. -// It is predominantly used in the UsersStore.Get method. -// -// It is expected that only one of ID or Name, Provider, and Scheme will be -// specified, but all are provided UserStores should prefer ID. -type UserQuery struct { - ID *uint64 - Name *string - Provider *string - Scheme *string -} - -// UsersStore is the Storage and retrieval of authentication information -// -// While not necessary for the app to function correctly, it is -// expected that Implementors of the UsersStore will take -// care to guarantee that the combinartion of a users Name, Provider, -// and Scheme are unique. -type UsersStore interface { - // All lists all users from the UsersStore - All(context.Context) ([]User, error) - // Create a new User in the UsersStore - Add(context.Context, *User) (*User, error) - // Delete the User from the UsersStore - Delete(context.Context, *User) error - // Get retrieves a user if name exists. - Get(ctx context.Context, q UserQuery) (*User, error) - // Update the user's permissions or roles - Update(context.Context, *User) error - // Num returns the number of users in the UsersStore - Num(context.Context) (int, error) -} - -// Database represents a database in a time series source -type Database struct { - Name string `json:"name"` // a unique string identifier for the database - Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy) - Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy) - ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy) -} - -// RetentionPolicy represents a retention policy in a time series source -type RetentionPolicy struct { - Name string `json:"name"` // a unique string identifier for the retention policy - Duration string `json:"duration,omitempty"` // the duration - Replication int32 `json:"replication,omitempty"` // the replication factor - ShardDuration string `json:"shardDuration,omitempty"` // the shard duration - Default bool `json:"isDefault,omitempty"` // whether the RP should be the default -} - -// Measurement represents a measurement in a time series source -type Measurement struct { - Name string `json:"name"` // a unique string identifier for the measurement -} - -// Databases represents a databases in a time series source -type Databases interface { - // AllDB lists all databases in the current data source - AllDB(context.Context) ([]Database, error) - // Connect connects to a database in the current data source - Connect(context.Context, *Source) error - // CreateDB creates a database in the current data source - CreateDB(context.Context, *Database) (*Database, error) - // DropDB drops a database in the current data source - DropDB(context.Context, string) error - - // AllRP lists all retention policies in the current data source - AllRP(context.Context, string) ([]RetentionPolicy, error) - // CreateRP creates a retention policy in the current data source - CreateRP(context.Context, string, *RetentionPolicy) (*RetentionPolicy, error) - // UpdateRP updates a retention policy in the current data source - UpdateRP(context.Context, string, string, *RetentionPolicy) (*RetentionPolicy, error) - // DropRP drops a retention policy in the current data source - DropRP(context.Context, string, string) error - - // GetMeasurements lists measurements in the current data source - GetMeasurements(ctx context.Context, db string, limit, offset int) ([]Measurement, error) -} - -// Annotation represents a time-based metadata associated with a source -type Annotation struct { - ID string // ID is the unique annotation identifier - StartTime time.Time // StartTime starts the annotation - EndTime time.Time // EndTime ends the annotation - Text string // Text is the associated user-facing text describing the annotation - Type string // Type describes the kind of annotation -} - -// AnnotationStore represents storage and retrieval of annotations -type AnnotationStore interface { - All(ctx context.Context, start, stop time.Time) ([]Annotation, error) // All lists all Annotations between start and stop - Add(context.Context, *Annotation) (*Annotation, error) // Add creates a new annotation in the store - Delete(ctx context.Context, id string) error // Delete removes the annotation from the store - Get(ctx context.Context, id string) (*Annotation, error) // Get retrieves an annotation - Update(context.Context, *Annotation) error // Update replaces annotation -} - -// DashboardID is the dashboard ID -type DashboardID int - -// Dashboard represents all visual and query data for a dashboard -type Dashboard struct { - ID DashboardID `json:"id"` - Cells []DashboardCell `json:"cells"` - Templates []Template `json:"templates"` - Name string `json:"name"` - Organization string `json:"organization"` // Organization is the organization ID that resource belongs to -} - -// Axis represents the visible extents of a visualization -type Axis struct { - Bounds []string `json:"bounds"` // bounds are an arbitrary list of client-defined strings that specify the viewport for a cell - LegacyBounds [2]int64 `json:"-"` // legacy bounds are for testing a migration from an earlier version of axis - Label string `json:"label"` // label is a description of this Axis - Prefix string `json:"prefix"` // Prefix represents a label prefix for formatting axis values - Suffix string `json:"suffix"` // Suffix represents a label suffix for formatting axis values - Base string `json:"base"` // Base represents the radix for formatting axis values - Scale string `json:"scale"` // Scale is the axis formatting scale. Supported: "log", "linear" -} - -// CellColor represents the encoding of data into visualizations -type CellColor struct { - ID string `json:"id"` // ID is the unique id of the cell color - Type string `json:"type"` // Type is how the color is used. Accepted (min,max,threshold) - Hex string `json:"hex"` // Hex is the hex number of the color - Name string `json:"name"` // Name is the user-facing name of the hex color - Value string `json:"value"` // Value is the data value mapped to this color -} - -// Legend represents the encoding of data into a legend -type Legend struct { - Type string `json:"type,omitempty"` - Orientation string `json:"orientation,omitempty"` -} - -// DashboardCell holds visual and query information for a cell -type DashboardCell struct { - ID string `json:"i"` - X int32 `json:"x"` - Y int32 `json:"y"` - W int32 `json:"w"` - H int32 `json:"h"` - Name string `json:"name"` - Queries []DashboardQuery `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - CellColors []CellColor `json:"colors"` - Legend Legend `json:"legend"` - TableOptions TableOptions `json:"tableOptions,omitempty"` - FieldOptions []RenamableField `json:"fieldOptions"` - TimeFormat string `json:"timeFormat"` - DecimalPlaces DecimalPlaces `json:"decimalPlaces"` - // These were added after this code was brought over to influxdb. - Note string `json:"note,omitempty"` - NoteVisibility string `json:"noteVisibility,omitempty"` -} - -// RenamableField is a column/row field in a DashboardCell of type Table -type RenamableField struct { - InternalName string `json:"internalName"` - DisplayName string `json:"displayName"` - Visible bool `json:"visible"` -} - -// TableOptions is a type of options for a DashboardCell with type Table -type TableOptions struct { - VerticalTimeAxis bool `json:"verticalTimeAxis"` - SortBy RenamableField `json:"sortBy"` - Wrapping string `json:"wrapping"` - FixFirstColumn bool `json:"fixFirstColumn"` -} - -// DecimalPlaces indicates whether decimal places should be enforced, and how many digits it should show. -type DecimalPlaces struct { - IsEnforced bool `json:"isEnforced"` - Digits int32 `json:"digits"` -} - -// DashboardsStore is the storage and retrieval of dashboards -type DashboardsStore interface { - // All lists all dashboards from the DashboardStore - All(context.Context) ([]Dashboard, error) - // Create a new Dashboard in the DashboardStore - Add(context.Context, Dashboard) (Dashboard, error) - // Delete the Dashboard from the DashboardStore if `ID` exists. - Delete(context.Context, Dashboard) error - // Get retrieves a dashboard if `ID` exists. - Get(ctx context.Context, id DashboardID) (Dashboard, error) - // Update replaces the dashboard information - Update(context.Context, Dashboard) error -} - -// Cell is a rectangle and multiple time series queries to visualize. -type Cell struct { - X int32 `json:"x"` - Y int32 `json:"y"` - W int32 `json:"w"` - H int32 `json:"h"` - I string `json:"i"` - Name string `json:"name"` - Queries []Query `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - CellColors []CellColor `json:"colors"` -} - -// Layout is a collection of Cells for visualization -type Layout struct { - ID string `json:"id"` - Application string `json:"app"` - Measurement string `json:"measurement"` - Autoflow bool `json:"autoflow"` - Cells []Cell `json:"cells"` -} - -// LayoutsStore stores dashboards and associated Cells -type LayoutsStore interface { - // All returns all dashboards in the store - All(context.Context) ([]Layout, error) - // Add creates a new dashboard in the LayoutsStore - Add(context.Context, Layout) (Layout, error) - // Delete the dashboard from the store - Delete(context.Context, Layout) error - // Get retrieves Layout if `ID` exists - Get(ctx context.Context, ID string) (Layout, error) - // Update the dashboard in the store. - Update(context.Context, Layout) error -} - -// MappingWildcard is the wildcard value for mappings -const MappingWildcard string = "*" - -// A Mapping is the structure that is used to determine a users -// role within an organization. The high level idea is to grant -// certain roles to certain users without them having to be given -// explicit role within the organization. -// -// One can think of a mapping like so: -// Provider:Scheme:Group -> Organization -// github:oauth2:influxdata -> Happy -// beyondcorp:ldap:influxdata -> TheBillHilliettas -// -// Any of Provider, Scheme, or Group may be provided as a wildcard * -// github:oauth2:* -> MyOrg -// *:*:* -> AllOrg -type Mapping struct { - ID string `json:"id"` - Organization string `json:"organizationId"` - Provider string `json:"provider"` - Scheme string `json:"scheme"` - ProviderOrganization string `json:"providerOrganization"` -} - -// MappingsStore is the storage and retrieval of Mappings -type MappingsStore interface { - // Add creates a new Mapping. - // The Created mapping is returned back to the user with the - // ID field populated. - Add(context.Context, *Mapping) (*Mapping, error) - // All lists all Mapping in the MappingsStore - All(context.Context) ([]Mapping, error) - // Delete removes an Mapping from the MappingsStore - Delete(context.Context, *Mapping) error - // Get retrieves an Mapping from the MappingsStore - Get(context.Context, string) (*Mapping, error) - // Update updates an Mapping in the MappingsStore - Update(context.Context, *Mapping) error -} - -// Organization is a group of resources under a common name -type Organization struct { - ID string `json:"id"` - Name string `json:"name"` - // DefaultRole is the name of the role that is the default for any users added to the organization - DefaultRole string `json:"defaultRole,omitempty"` -} - -// OrganizationQuery represents the attributes that a organization may be retrieved by. -// It is predominantly used in the OrganizationsStore.Get method. -// It is expected that only one of ID or Name will be specified, but will prefer ID over Name if both are specified. -type OrganizationQuery struct { - // If an ID is provided in the query, the lookup time for an organization will be O(1). - ID *string - // If Name is provided, the lookup time will be O(n). - Name *string -} - -// OrganizationsStore is the storage and retrieval of Organizations -// -// While not necessary for the app to function correctly, it is -// expected that Implementors of the OrganizationsStore will take -// care to guarantee that the Organization.Name is unique. Allowing -// for duplicate names creates a confusing UX experience for the User. -type OrganizationsStore interface { - // Add creates a new Organization. - // The Created organization is returned back to the user with the - // ID field populated. - Add(context.Context, *Organization) (*Organization, error) - // All lists all Organizations in the OrganizationsStore - All(context.Context) ([]Organization, error) - // Delete removes an Organization from the OrganizationsStore - Delete(context.Context, *Organization) error - // Get retrieves an Organization from the OrganizationsStore - Get(context.Context, OrganizationQuery) (*Organization, error) - // Update updates an Organization in the OrganizationsStore - Update(context.Context, *Organization) error - // CreateDefault creates the default organization - CreateDefault(ctx context.Context) error - // DefaultOrganization returns the DefaultOrganization - DefaultOrganization(ctx context.Context) (*Organization, error) -} - -// Config is the global application Config for parameters that can be set via -// API, with different sections, such as Auth -type Config struct { - Auth AuthConfig `json:"auth"` -} - -// AuthConfig is the global application config section for auth parameters -type AuthConfig struct { - // SuperAdminNewUsers configuration option that specifies which users will auto become super admin - SuperAdminNewUsers bool `json:"superAdminNewUsers"` -} - -// ConfigStore is the storage and retrieval of global application Config -type ConfigStore interface { - // Initialize creates the initial configuration - Initialize(context.Context) error - // Get retrieves the whole Config from the ConfigStore - Get(context.Context) (*Config, error) - // Update updates the whole Config in the ConfigStore - Update(context.Context, *Config) error -} - -// OrganizationConfig is the organization config for parameters that can -// be set via API, with different sections, such as LogViewer -type OrganizationConfig struct { - OrganizationID string `json:"organization"` - LogViewer LogViewerConfig `json:"logViewer"` -} - -// LogViewerConfig is the configuration settings for the Log Viewer UI -type LogViewerConfig struct { - Columns []LogViewerColumn `json:"columns"` -} - -// LogViewerColumn is a specific column of the Log Viewer UI -type LogViewerColumn struct { - Name string `json:"name"` - Position int32 `json:"position"` - Encodings []ColumnEncoding `json:"encodings"` -} - -// ColumnEncoding is the settings for a specific column of the Log Viewer UI -type ColumnEncoding struct { - Type string `json:"type"` - Value string `json:"value"` - Name string `json:"name,omitempty"` -} - -// OrganizationConfigStore is the storage and retrieval of organization Configs -type OrganizationConfigStore interface { - // FindOrCreate gets an existing OrganizationConfig and creates one if none exists - FindOrCreate(ctx context.Context, orgID string) (*OrganizationConfig, error) - // Put replaces the whole organization config in the OrganizationConfigStore - Put(context.Context, *OrganizationConfig) error -} - -// BuildInfo is sent to the usage client to track versions and commits -type BuildInfo struct { - Version string - Commit string -} - -// BuildStore is the storage and retrieval of Chronograf build information -type BuildStore interface { - Get(context.Context) (BuildInfo, error) - Update(context.Context, BuildInfo) error -} - -// Environment is the set of front-end exposed environment variables -// that were set on the server -type Environment struct { - TelegrafSystemInterval time.Duration `json:"telegrafSystemInterval"` -} diff --git a/chronograf/cmd/chronoctl/add.go b/chronograf/cmd/chronoctl/add.go deleted file mode 100644 index ccb5add7111..00000000000 --- a/chronograf/cmd/chronoctl/add.go +++ /dev/null @@ -1,120 +0,0 @@ -package main - -import ( - "context" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type AddCommand struct { - BoltPath string `short:"b" long:"bolt-path" description:"Full path to boltDB file (e.g. './chronograf-v1.db')" env:"BOLT_PATH" default:"chronograf-v1.db"` - ID *uint64 `short:"i" long:"id" description:"Users ID. Must be id for existing user"` - Username string `short:"n" long:"name" description:"Users name. Must be Oauth-able email address or username"` - Provider string `short:"p" long:"provider" description:"Name of the Auth provider (e.g. google, github, auth0, or generic)"` - Scheme string `short:"s" long:"scheme" description:"Authentication scheme that matches auth provider (e.g. oauth2)" default:"oauth2"` - Organizations string `short:"o" long:"orgs" description:"A comma separated list of organizations that the user should be added to" default:"default"` -} - -var addCommand AddCommand - -func (l *AddCommand) Execute(args []string) error { - c, err := NewBoltClient(l.BoltPath) - if err != nil { - return err - } - defer c.Close() - - q := chronograf.UserQuery{ - Name: &l.Username, - Provider: &l.Provider, - Scheme: &l.Scheme, - } - - if l.ID != nil { - q.ID = l.ID - } - - ctx := context.Background() - - user, err := c.UsersStore.Get(ctx, q) - if err != nil && err != chronograf.ErrUserNotFound { - return err - } else if err == chronograf.ErrUserNotFound { - user = &chronograf.User{ - Name: l.Username, - Provider: l.Provider, - Scheme: l.Scheme, - Roles: []chronograf.Role{ - { - Name: "member", - Organization: "default", - }, - }, - SuperAdmin: true, - } - - user, err = c.UsersStore.Add(ctx, user) - if err != nil { - return err - } - } else { - user.SuperAdmin = true - if len(user.Roles) == 0 { - user.Roles = []chronograf.Role{ - { - Name: "member", - Organization: "default", - }, - } - } - if err = c.UsersStore.Update(ctx, user); err != nil { - return err - } - } - - // TODO(desa): Apply mapping to user and update their roles - roles := []chronograf.Role{} -OrgLoop: - for _, org := range strings.Split(l.Organizations, ",") { - // Check to see is user is already a part of the organization - for _, r := range user.Roles { - if r.Organization == org { - continue OrgLoop - } - } - - orgQuery := chronograf.OrganizationQuery{ - ID: &org, - } - o, err := c.OrganizationsStore.Get(ctx, orgQuery) - if err != nil { - return err - } - - role := chronograf.Role{ - Organization: org, - Name: o.DefaultRole, - } - roles = append(roles, role) - } - - user.Roles = append(user.Roles, roles...) - if err = c.UsersStore.Update(ctx, user); err != nil { - return err - } - - w := NewTabWriter() - WriteHeaders(w) - WriteUser(w, user) - w.Flush() - - return nil -} - -func init() { - parser.AddCommand("add-superadmin", - "Creates a new superadmin user", - "The add-user command will create a new user with superadmin status", - &addCommand) -} diff --git a/chronograf/cmd/chronoctl/list.go b/chronograf/cmd/chronoctl/list.go deleted file mode 100644 index 6396359adfc..00000000000 --- a/chronograf/cmd/chronoctl/list.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "context" -) - -type ListCommand struct { - BoltPath string `short:"b" long:"bolt-path" description:"Full path to boltDB file (e.g. './chronograf-v1.db')" env:"BOLT_PATH" default:"chronograf-v1.db"` -} - -var listCommand ListCommand - -func (l *ListCommand) Execute(args []string) error { - c, err := NewBoltClient(l.BoltPath) - if err != nil { - return err - } - defer c.Close() - - ctx := context.Background() - users, err := c.UsersStore.All(ctx) - if err != nil { - return err - } - - w := NewTabWriter() - WriteHeaders(w) - for _, user := range users { - WriteUser(w, &user) - } - w.Flush() - - return nil -} - -func init() { - parser.AddCommand("list-users", - "Lists users", - "The list-users command will list all users in the chronograf boltdb instance", - &listCommand) -} diff --git a/chronograf/cmd/chronoctl/main.go b/chronograf/cmd/chronoctl/main.go deleted file mode 100644 index 3e8f180b96c..00000000000 --- a/chronograf/cmd/chronoctl/main.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/jessevdk/go-flags" -) - -type Options struct { -} - -var options Options - -var parser = flags.NewParser(&options, flags.Default) - -func main() { - if _, err := parser.Parse(); err != nil { - if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp { - os.Exit(0) - } else { - fmt.Fprintln(os.Stdout) - parser.WriteHelp(os.Stdout) - os.Exit(1) - } - } -} diff --git a/chronograf/cmd/chronoctl/util.go b/chronograf/cmd/chronoctl/util.go deleted file mode 100644 index fdf691681cb..00000000000 --- a/chronograf/cmd/chronoctl/util.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io" - "os" - "strings" - "text/tabwriter" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func NewBoltClient(path string) (*bolt.Client, error) { - c := bolt.NewClient() - c.Path = path - - ctx := context.Background() - logger := mocks.NewLogger() - var bi chronograf.BuildInfo - if err := c.Open(ctx, logger, bi); err != nil { - return nil, err - } - - return c, nil -} - -func NewTabWriter() *tabwriter.Writer { - return tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0) -} - -func WriteHeaders(w io.Writer) { - fmt.Fprintln(w, "ID\tName\tProvider\tScheme\tSuperAdmin\tOrganization(s)") -} - -func WriteUser(w io.Writer, user *chronograf.User) { - orgs := []string{} - for _, role := range user.Roles { - orgs = append(orgs, role.Organization) - } - fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%t\t%s\n", user.ID, user.Name, user.Provider, user.Scheme, user.SuperAdmin, strings.Join(orgs, ",")) -} diff --git a/chronograf/cmd/chronograf/main.go b/chronograf/cmd/chronograf/main.go deleted file mode 100644 index 516dc2f4925..00000000000 --- a/chronograf/cmd/chronograf/main.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "context" - "log" - "os" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/server" - flags "github.com/jessevdk/go-flags" -) - -// Build flags -var ( - version = "" - commit = "" -) - -func main() { - srv := server.Server{ - BuildInfo: chronograf.BuildInfo{ - Version: version, - Commit: commit, - }, - } - - parser := flags.NewParser(&srv, flags.Default) - parser.ShortDescription = `Chronograf` - parser.LongDescription = `Options for Chronograf` - - if _, err := parser.Parse(); err != nil { - code := 1 - if fe, ok := err.(*flags.Error); ok { - if fe.Type == flags.ErrHelp { - code = 0 - } - } - os.Exit(code) - } - - if srv.ShowVersion { - log.Printf("Chronograf %s (git: %s)\n", version, commit) - os.Exit(0) - } - - ctx := context.Background() - if err := srv.Serve(ctx); err != nil { - log.Fatalln(err) - } -} diff --git a/chronograf/dist/Makefile b/chronograf/dist/Makefile deleted file mode 100644 index 0f7175c065a..00000000000 --- a/chronograf/dist/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# List any generated files here -TARGETS = dist_gen.go -# List any source files used to generate the targets here -SOURCES = dist.go $(shell find ../../ui/build -type f) -# List any directories that have their own Makefile here -SUBDIRS = - -# Default target -all: $(SUBDIRS) $(TARGETS) - -# Recurse into subdirs for same make goal -$(SUBDIRS): - $(MAKE) -C $@ $(MAKECMDGOALS) - -# Clean all targets recursively -clean: $(SUBDIRS) - rm -f $(TARGETS) - -# Define go generate if not already defined -GO_GENERATE := go generate - -# Run go generate for the targets -$(TARGETS): $(SOURCES) - $(GO_GENERATE) -x - -.PHONY: all clean $(SUBDIRS) diff --git a/chronograf/dist/TODO.go b/chronograf/dist/TODO.go deleted file mode 100644 index 0465bc84652..00000000000 --- a/chronograf/dist/TODO.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !assets - -package dist - -import ( - "errors" - "os" -) - -// The functions defined in this file are placeholders -// when the binary is compiled without assets. - -var errNoAssets = errors.New("no assets included in binary") - -// Asset returns an error stating no assets were included in the binary. -func Asset(string) ([]byte, error) { - return nil, errNoAssets -} - -// AssetInfo returns an error stating no assets were included in the binary. -func AssetInfo(name string) (os.FileInfo, error) { - return nil, errNoAssets -} - -// AssetDir returns nil because there are no assets included in the binary. -func AssetDir(name string) ([]string, error) { - return nil, errNoAssets -} diff --git a/chronograf/dist/dir.go b/chronograf/dist/dir.go deleted file mode 100644 index 1f4ac90b933..00000000000 --- a/chronograf/dist/dir.go +++ /dev/null @@ -1,33 +0,0 @@ -package dist - -import ( - "net/http" - "os" -) - -// Dir functions like http.Dir except returns the content of a default file if not found. -type Dir struct { - Default string - dir http.Dir -} - -// NewDir constructs a Dir with a default file -func NewDir(dir, def string) Dir { - return Dir{ - Default: def, - dir: http.Dir(dir), - } -} - -// Open will return the file in the dir if it exists, or, the Default file otherwise. -func (d Dir) Open(name string) (http.File, error) { - f, err := d.dir.Open(name) - if err != nil { - f, err = os.Open(d.Default) - if err != nil { - return nil, err - } - return f, nil - } - return f, err -} diff --git a/chronograf/dist/dist.go b/chronograf/dist/dist.go deleted file mode 100644 index 0199d24a5f6..00000000000 --- a/chronograf/dist/dist.go +++ /dev/null @@ -1,88 +0,0 @@ -package dist - -//go:generate env GO111MODULE=on go run github.com/kevinburke/go-bindata/go-bindata -o dist_gen.go -ignore 'map|go' -tags assets -pkg dist ../../ui/build/... - -import ( - "fmt" - "net/http" - - "github.com/elazarl/go-bindata-assetfs" -) - -// DebugAssets serves assets via a specified directory -type DebugAssets struct { - Dir string // Dir is a directory location of asset files - Default string // Default is the file to serve if file is not found. -} - -// Handler is an http.FileServer for the Dir -func (d *DebugAssets) Handler() http.Handler { - return http.FileServer(NewDir(d.Dir, d.Default)) -} - -// BindataAssets serves assets from go-bindata, but, also serves Default if assent doesn't exist -// This is to support single-page react-apps with its own router. -type BindataAssets struct { - Prefix string // Prefix is prepended to the http file request - Default string // Default is the file to serve if the file is not found - DefaultContentType string // DefaultContentType is the content type of the default file -} - -// Handler serves go-bindata using a go-bindata-assetfs façade -func (b *BindataAssets) Handler() http.Handler { - return b -} - -// addCacheHeaders requests an hour of Cache-Control and sets an ETag based on file size and modtime -func (b *BindataAssets) addCacheHeaders(filename string, w http.ResponseWriter) error { - w.Header().Add("Cache-Control", "public, max-age=3600") - fi, err := AssetInfo(filename) - if err != nil { - return err - } - - hour, minute, second := fi.ModTime().Clock() - etag := fmt.Sprintf(`"%d%d%d%d%d"`, fi.Size(), fi.ModTime().Day(), hour, minute, second) - - w.Header().Set("ETag", etag) - return nil -} - -// ServeHTTP wraps http.FileServer by returning a default asset if the asset -// doesn't exist. This supports single-page react-apps with its own -// built-in router. Additionally, we override the content-type if the -// Default file is used. -func (b *BindataAssets) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // def wraps the assets to return the default file if the file doesn't exist - def := func(name string) ([]byte, error) { - // If the named asset exists, then return it directly. - octets, err := Asset(name) - if err != nil { - // If this is at / then we just error out so we can return a Directory - // This directory will then be redirected by go to the /index.html - if name == b.Prefix { - return nil, err - } - // If this is anything other than slash, we just return the default - // asset. This default asset will handle the routing. - // Additionally, because we know we are returning the default asset, - // we need to set the default asset's content-type. - w.Header().Set("Content-Type", b.DefaultContentType) - if err := b.addCacheHeaders(b.Default, w); err != nil { - return nil, err - } - return Asset(b.Default) - } - if err := b.addCacheHeaders(name, w); err != nil { - return nil, err - } - return octets, nil - } - var dir http.FileSystem = &assetfs.AssetFS{ - Asset: def, - AssetDir: AssetDir, - AssetInfo: AssetInfo, - Prefix: b.Prefix, - } - http.FileServer(dir).ServeHTTP(w, r) -} diff --git a/chronograf/enterprise/enterprise.go b/chronograf/enterprise/enterprise.go deleted file mode 100644 index fd287bc6030..00000000000 --- a/chronograf/enterprise/enterprise.go +++ /dev/null @@ -1,225 +0,0 @@ -package enterprise - -import ( - "container/ring" - "net/url" - "strings" - - "context" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -var _ chronograf.TimeSeries = &Client{} - -// Ctrl represents administrative controls over an Influx Enterprise cluster -type Ctrl interface { - ShowCluster(ctx context.Context) (*Cluster, error) - - Users(ctx context.Context, name *string) (*Users, error) - User(ctx context.Context, name string) (*User, error) - CreateUser(ctx context.Context, name, passwd string) error - DeleteUser(ctx context.Context, name string) error - ChangePassword(ctx context.Context, name, passwd string) error - SetUserPerms(ctx context.Context, name string, perms Permissions) error - - UserRoles(ctx context.Context) (map[string]Roles, error) - - Roles(ctx context.Context, name *string) (*Roles, error) - Role(ctx context.Context, name string) (*Role, error) - CreateRole(ctx context.Context, name string) error - DeleteRole(ctx context.Context, name string) error - SetRolePerms(ctx context.Context, name string, perms Permissions) error - SetRoleUsers(ctx context.Context, name string, users []string) error - AddRoleUsers(ctx context.Context, name string, users []string) error - RemoveRoleUsers(ctx context.Context, name string, users []string) error -} - -// Client is a device for retrieving time series data from an Influx Enterprise -// cluster. It is configured using the addresses of one or more meta node URLs. -// Data node URLs are retrieved automatically from the meta nodes and queries -// are appropriately load balanced across the cluster. -type Client struct { - Ctrl - UsersStore chronograf.UsersStore - RolesStore chronograf.RolesStore - Logger chronograf.Logger - - dataNodes *ring.Ring - opened bool -} - -// NewClientWithTimeSeries initializes a Client with a known set of TimeSeries. -func NewClientWithTimeSeries(lg chronograf.Logger, mu string, authorizer influx.Authorizer, tls, insecure bool, series ...chronograf.TimeSeries) (*Client, error) { - metaURL, err := parseMetaURL(mu, tls) - if err != nil { - return nil, err - } - - ctrl := NewMetaClient(metaURL, insecure, authorizer) - c := &Client{ - Ctrl: ctrl, - UsersStore: &UserStore{ - Ctrl: ctrl, - Logger: lg, - }, - RolesStore: &RolesStore{ - Ctrl: ctrl, - Logger: lg, - }, - } - - c.dataNodes = ring.New(len(series)) - - for _, s := range series { - c.dataNodes.Value = s - c.dataNodes = c.dataNodes.Next() - } - - return c, nil -} - -// NewClientWithURL initializes an Enterprise client with a URL to a Meta Node. -// Acceptable URLs include host:port combinations as well as scheme://host:port -// varieties. TLS is used when the URL contains "https" or when the TLS -// parameter is set. authorizer will add the correct `Authorization` headers -// on the out-bound request. -func NewClientWithURL(mu string, authorizer influx.Authorizer, tls bool, insecure bool, lg chronograf.Logger) (*Client, error) { - metaURL, err := parseMetaURL(mu, tls) - if err != nil { - return nil, err - } - - ctrl := NewMetaClient(metaURL, insecure, authorizer) - return &Client{ - Ctrl: ctrl, - UsersStore: &UserStore{ - Ctrl: ctrl, - Logger: lg, - }, - RolesStore: &RolesStore{ - Ctrl: ctrl, - Logger: lg, - }, - Logger: lg, - }, nil -} - -// Connect prepares a Client to process queries. It must be called prior to calling Query -func (c *Client) Connect(ctx context.Context, src *chronograf.Source) error { - c.opened = true - // return early if we already have dataNodes - if c.dataNodes != nil { - return nil - } - cluster, err := c.Ctrl.ShowCluster(ctx) - if err != nil { - return err - } - - c.dataNodes = ring.New(len(cluster.DataNodes)) - for _, dn := range cluster.DataNodes { - cl := &influx.Client{ - Logger: c.Logger, - } - dataSrc := &chronograf.Source{} - *dataSrc = *src - dataSrc.URL = dn.HTTPAddr - if err := cl.Connect(ctx, dataSrc); err != nil { - continue - } - c.dataNodes.Value = cl - c.dataNodes = c.dataNodes.Next() - } - return nil -} - -// Query retrieves timeseries information pertaining to a specified query. It -// can be cancelled by using a provided context. -func (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) { - if !c.opened { - return nil, chronograf.ErrUninitialized - } - return c.nextDataNode().Query(ctx, q) -} - -// Write records points into a time series -func (c *Client) Write(ctx context.Context, points []chronograf.Point) error { - if !c.opened { - return chronograf.ErrUninitialized - } - return c.nextDataNode().Write(ctx, points) -} - -// Users is the interface to the users within Influx Enterprise -func (c *Client) Users(context.Context) chronograf.UsersStore { - return c.UsersStore -} - -// Roles provide a grouping of permissions given to a grouping of users -func (c *Client) Roles(ctx context.Context) (chronograf.RolesStore, error) { - return c.RolesStore, nil -} - -// Permissions returns all Influx Enterprise permission strings -func (c *Client) Permissions(context.Context) chronograf.Permissions { - all := chronograf.Allowances{ - "NoPermissions", - "ViewAdmin", - "ViewChronograf", - "CreateDatabase", - "CreateUserAndRole", - "AddRemoveNode", - "DropDatabase", - "DropData", - "ReadData", - "WriteData", - "Rebalance", - "ManageShard", - "ManageContinuousQuery", - "ManageQuery", - "ManageSubscription", - "Monitor", - "CopyShard", - "KapacitorAPI", - "KapacitorConfigAPI", - } - - return chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: all, - }, - { - Scope: chronograf.DBScope, - Allowed: all, - }, - } -} - -// nextDataNode retrieves the next available data node -func (c *Client) nextDataNode() chronograf.TimeSeries { - c.dataNodes = c.dataNodes.Next() - return c.dataNodes.Value.(chronograf.TimeSeries) -} - -// parseMetaURL constructs a url from either a host:port combination or a -// scheme://host:port combo. The optional TLS parameter takes precedence over -// any TLS preference found in the provided URL -func parseMetaURL(mu string, tls bool) (metaURL *url.URL, err error) { - if strings.Contains(mu, "http") { - metaURL, err = url.Parse(mu) - } else { - metaURL = &url.URL{ - Scheme: "http", - Host: mu, - } - } - - if tls { - metaURL.Scheme = "https" - } - - return -} diff --git a/chronograf/enterprise/enterprise_test.go b/chronograf/enterprise/enterprise_test.go deleted file mode 100644 index 06efffc9fcf..00000000000 --- a/chronograf/enterprise/enterprise_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package enterprise_test - -import ( - "context" - "net/http" - "net/http/httptest" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/enterprise" - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -func Test_Enterprise_FetchesDataNodes(t *testing.T) { - t.Parallel() - showClustersCalled := false - ctrl := &mockCtrl{ - showCluster: func(ctx context.Context) (*enterprise.Cluster, error) { - showClustersCalled = true - return &enterprise.Cluster{}, nil - }, - } - - cl := &enterprise.Client{ - Ctrl: ctrl, - } - - bg := context.Background() - err := cl.Connect(bg, &chronograf.Source{}) - - if err != nil { - t.Fatal("Unexpected error while creating enterprise client. err:", err) - } - - if !showClustersCalled { - t.Fatal("Expected request to meta node but none was issued") - } -} - -func Test_Enterprise_IssuesQueries(t *testing.T) { - t.Parallel() - - called := false - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - called = true - if r.URL.Path != "/query" { - t.Fatal("Expected request to '/query' but was", r.URL.Path) - } - rw.Write([]byte(`{}`)) - })) - defer ts.Close() - - cl := &enterprise.Client{ - Ctrl: NewMockControlClient(ts.URL), - Logger: &chronograf.NoopLogger{}, - } - - err := cl.Connect(context.Background(), &chronograf.Source{}) - if err != nil { - t.Fatal("Unexpected error initializing client: err:", err) - } - - _, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"}) - - if err != nil { - t.Fatal("Unexpected error while querying data node: err:", err) - } - - if !called { - t.Fatal("Expected request to data node but none was received") - } -} - -func Test_Enterprise_AdvancesDataNodes(t *testing.T) { - m1 := NewMockTimeSeries("http://host-1.example.com:8086") - m2 := NewMockTimeSeries("http://host-2.example.com:8086") - cl, err := enterprise.NewClientWithTimeSeries( - &chronograf.NoopLogger{}, - "http://meta.example.com:8091", - &influx.BasicAuth{ - Username: "marty", - Password: "thelake", - }, - false, - false, - chronograf.TimeSeries(m1), - chronograf.TimeSeries(m2)) - if err != nil { - t.Error("Unexpected error while initializing client: err:", err) - } - - err = cl.Connect(context.Background(), &chronograf.Source{}) - if err != nil { - t.Error("Unexpected error while initializing client: err:", err) - } - - _, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"}) - if err != nil { - t.Fatal("Unexpected error while issuing query: err:", err) - } - - _, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"}) - if err != nil { - t.Fatal("Unexpected error while issuing query: err:", err) - } - - if m1.QueryCtr != 1 || m2.QueryCtr != 1 { - t.Fatalf("Expected m1.Query to be called once but was %d. Expected m2.Query to be called once but was %d\n", m1.QueryCtr, m2.QueryCtr) - } -} - -func Test_Enterprise_NewClientWithURL(t *testing.T) { - t.Parallel() - - urls := []struct { - name string - url string - username string - password string - tls bool - insecureSkipVerify bool - wantErr bool - }{ - { - name: "no tls should have no error", - url: "http://localhost:8086", - }, - { - name: "tls should have no error", - url: "https://localhost:8086", - }, - { - name: "no tls but with basic auth", - url: "http://localhost:8086", - username: "username", - password: "password", - }, - { - name: "tls request but url is not tls should not error", - url: "http://localhost:8086", - tls: true, - }, - { - name: "https with tls and with insecureSkipVerify should not error", - url: "https://localhost:8086", - tls: true, - insecureSkipVerify: true, - }, - { - name: "URL does not require http or https", - url: "localhost:8086", - }, - { - name: "URL with TLS request should not error", - url: "localhost:8086", - tls: true, - }, - { - name: "invalid URL causes error", - url: ":http", - wantErr: true, - }, - } - - for _, testURL := range urls { - _, err := enterprise.NewClientWithURL( - testURL.url, - &influx.BasicAuth{ - Username: testURL.username, - Password: testURL.password, - }, - testURL.tls, - testURL.insecureSkipVerify, - &chronograf.NoopLogger{}) - if err != nil && !testURL.wantErr { - t.Errorf("Unexpected error creating Client with URL %s and TLS preference %t. err: %s", testURL.url, testURL.tls, err.Error()) - } else if err == nil && testURL.wantErr { - t.Errorf("Expected error creating Client with URL %s and TLS preference %t", testURL.url, testURL.tls) - } - } -} - -func Test_Enterprise_ComplainsIfNotOpened(t *testing.T) { - m1 := NewMockTimeSeries("http://host-1.example.com:8086") - cl, err := enterprise.NewClientWithTimeSeries( - &chronograf.NoopLogger{}, - "http://meta.example.com:8091", - &influx.BasicAuth{ - Username: "docbrown", - Password: "1.21 gigawatts", - }, - false, false, chronograf.TimeSeries(m1)) - if err != nil { - t.Error("Expected nil, but was this err:", err) - } - _, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"}) - if err != chronograf.ErrUninitialized { - t.Error("Expected ErrUninitialized, but was this err:", err) - } -} - -func TestClient_Permissions(t *testing.T) { - tests := []struct { - name string - - want chronograf.Permissions - }{ - { - name: "All possible enterprise permissions", - want: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{ - "NoPermissions", - "ViewAdmin", - "ViewChronograf", - "CreateDatabase", - "CreateUserAndRole", - "AddRemoveNode", - "DropDatabase", - "DropData", - "ReadData", - "WriteData", - "Rebalance", - "ManageShard", - "ManageContinuousQuery", - "ManageQuery", - "ManageSubscription", - "Monitor", - "CopyShard", - "KapacitorAPI", - "KapacitorConfigAPI", - }, - }, - { - Scope: chronograf.DBScope, - Allowed: chronograf.Allowances{ - "NoPermissions", - "ViewAdmin", - "ViewChronograf", - "CreateDatabase", - "CreateUserAndRole", - "AddRemoveNode", - "DropDatabase", - "DropData", - "ReadData", - "WriteData", - "Rebalance", - "ManageShard", - "ManageContinuousQuery", - "ManageQuery", - "ManageSubscription", - "Monitor", - "CopyShard", - "KapacitorAPI", - "KapacitorConfigAPI", - }, - }, - }, - }, - } - for _, tt := range tests { - c := &enterprise.Client{} - if got := c.Permissions(context.Background()); !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.Permissions() = %v, want %v", tt.name, got, tt.want) - } - } -} diff --git a/chronograf/enterprise/meta.go b/chronograf/enterprise/meta.go deleted file mode 100644 index 7b1bcd11f39..00000000000 --- a/chronograf/enterprise/meta.go +++ /dev/null @@ -1,568 +0,0 @@ -package enterprise - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -// Shared transports for all clients to prevent leaking connections -var ( - skipVerifyTransport = &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - defaultTransport = &http.Transport{} -) - -type client interface { - Do(URL *url.URL, path, method string, authorizer influx.Authorizer, params map[string]string, body io.Reader) (*http.Response, error) -} - -// MetaClient represents a Meta node in an Influx Enterprise cluster -type MetaClient struct { - URL *url.URL - client client - authorizer influx.Authorizer -} - -// NewMetaClient represents a meta node in an Influx Enterprise cluster -func NewMetaClient(url *url.URL, InsecureSkipVerify bool, authorizer influx.Authorizer) *MetaClient { - return &MetaClient{ - URL: url, - client: &defaultClient{ - InsecureSkipVerify: InsecureSkipVerify, - }, - authorizer: authorizer, - } -} - -type jsonLDAPConfig struct { - Enabled bool `json:"enabled"` -} - -// LDAPConfig represents the configuration for ldap from influxdb -type LDAPConfig struct { - Structured jsonLDAPConfig `json:"structured"` -} - -func (m *MetaClient) requestLDAPChannel(ctx context.Context, errors chan error) chan *http.Response { - channel := make(chan *http.Response, 1) - go (func() { - res, err := m.Do(ctx, "/ldap/v1/config", "GET", m.authorizer, nil, nil) - if err != nil { - errors <- err - } else { - channel <- res - } - })() - - return channel -} - -// GetLDAPConfig get the current ldap config response from influxdb enterprise -func (m *MetaClient) GetLDAPConfig(ctx context.Context) (*LDAPConfig, error) { - ctxt, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - - errorCh := make(chan error, 1) - responseChannel := m.requestLDAPChannel(ctxt, errorCh) - - select { - case res := <-responseChannel: - result, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - var config LDAPConfig - err = json.Unmarshal(result, &config) - if err != nil { - return nil, err - } - - return &config, nil - case err := <-errorCh: - return nil, err - case <-ctxt.Done(): - return nil, ctxt.Err() - } -} - -// ShowCluster returns the cluster configuration (not health) -func (m *MetaClient) ShowCluster(ctx context.Context) (*Cluster, error) { - res, err := m.Do(ctx, "/show-cluster", "GET", m.authorizer, nil, nil) - if err != nil { - return nil, err - } - - defer res.Body.Close() - dec := json.NewDecoder(res.Body) - out := &Cluster{} - err = dec.Decode(out) - if err != nil { - return nil, err - } - return out, nil -} - -// Users gets all the users. If name is not nil it filters for a single user -func (m *MetaClient) Users(ctx context.Context, name *string) (*Users, error) { - params := map[string]string{} - if name != nil { - params["name"] = *name - } - res, err := m.Do(ctx, "/user", "GET", m.authorizer, params, nil) - if err != nil { - return nil, err - } - - defer res.Body.Close() - dec := json.NewDecoder(res.Body) - users := &Users{} - err = dec.Decode(users) - if err != nil { - return nil, err - } - return users, nil -} - -// User returns a single Influx Enterprise user -func (m *MetaClient) User(ctx context.Context, name string) (*User, error) { - users, err := m.Users(ctx, &name) - if err != nil { - return nil, err - } - - for _, user := range users.Users { - return &user, nil - } - return nil, fmt.Errorf("no user found") -} - -// CreateUser adds a user to Influx Enterprise -func (m *MetaClient) CreateUser(ctx context.Context, name, passwd string) error { - return m.CreateUpdateUser(ctx, "create", name, passwd) -} - -// ChangePassword updates a user's password in Influx Enterprise -func (m *MetaClient) ChangePassword(ctx context.Context, name, passwd string) error { - return m.CreateUpdateUser(ctx, "change-password", name, passwd) -} - -// CreateUpdateUser is a helper function to POST to the /user Influx Enterprise endpoint -func (m *MetaClient) CreateUpdateUser(ctx context.Context, action, name, passwd string) error { - a := &UserAction{ - Action: action, - User: &User{ - Name: name, - Password: passwd, - }, - } - return m.Post(ctx, "/user", a, nil) -} - -// DeleteUser removes a user from Influx Enterprise -func (m *MetaClient) DeleteUser(ctx context.Context, name string) error { - a := &UserAction{ - Action: "delete", - User: &User{ - Name: name, - }, - } - - return m.Post(ctx, "/user", a, nil) -} - -// RemoveUserPerms revokes permissions for a user in Influx Enterprise -func (m *MetaClient) RemoveUserPerms(ctx context.Context, name string, perms Permissions) error { - a := &UserAction{ - Action: "remove-permissions", - User: &User{ - Name: name, - Permissions: perms, - }, - } - return m.Post(ctx, "/user", a, nil) -} - -// SetUserPerms removes permissions not in set and then adds the requested perms -func (m *MetaClient) SetUserPerms(ctx context.Context, name string, perms Permissions) error { - user, err := m.User(ctx, name) - if err != nil { - return err - } - - revoke, add := permissionsDifference(perms, user.Permissions) - - // first, revoke all the permissions the user currently has, but, - // shouldn't... - if len(revoke) > 0 { - err := m.RemoveUserPerms(ctx, name, revoke) - if err != nil { - return err - } - } - - // ... next, add any permissions the user should have - if len(add) > 0 { - a := &UserAction{ - Action: "add-permissions", - User: &User{ - Name: name, - Permissions: add, - }, - } - return m.Post(ctx, "/user", a, nil) - } - return nil -} - -// UserRoles returns a map of users to all of their current roles -func (m *MetaClient) UserRoles(ctx context.Context) (map[string]Roles, error) { - res, err := m.Roles(ctx, nil) - if err != nil { - return nil, err - } - - userRoles := make(map[string]Roles) - for _, role := range res.Roles { - for _, u := range role.Users { - ur, ok := userRoles[u] - if !ok { - ur = Roles{} - } - ur.Roles = append(ur.Roles, role) - userRoles[u] = ur - } - } - return userRoles, nil -} - -// Roles gets all the roles. If name is not nil it filters for a single role -func (m *MetaClient) Roles(ctx context.Context, name *string) (*Roles, error) { - params := map[string]string{} - if name != nil { - params["name"] = *name - } - res, err := m.Do(ctx, "/role", "GET", m.authorizer, params, nil) - if err != nil { - return nil, err - } - - defer res.Body.Close() - dec := json.NewDecoder(res.Body) - roles := &Roles{} - err = dec.Decode(roles) - if err != nil { - return nil, err - } - return roles, nil -} - -// Role returns a single named role -func (m *MetaClient) Role(ctx context.Context, name string) (*Role, error) { - roles, err := m.Roles(ctx, &name) - if err != nil { - return nil, err - } - for _, role := range roles.Roles { - return &role, nil - } - return nil, fmt.Errorf("no role found") -} - -// CreateRole adds a role to Influx Enterprise -func (m *MetaClient) CreateRole(ctx context.Context, name string) error { - a := &RoleAction{ - Action: "create", - Role: &Role{ - Name: name, - }, - } - return m.Post(ctx, "/role", a, nil) -} - -// DeleteRole removes a role from Influx Enterprise -func (m *MetaClient) DeleteRole(ctx context.Context, name string) error { - a := &RoleAction{ - Action: "delete", - Role: &Role{ - Name: name, - }, - } - return m.Post(ctx, "/role", a, nil) -} - -// RemoveRolePerms revokes permissions from a role -func (m *MetaClient) RemoveRolePerms(ctx context.Context, name string, perms Permissions) error { - a := &RoleAction{ - Action: "remove-permissions", - Role: &Role{ - Name: name, - Permissions: perms, - }, - } - return m.Post(ctx, "/role", a, nil) -} - -// SetRolePerms removes permissions not in set and then adds the requested perms to role -func (m *MetaClient) SetRolePerms(ctx context.Context, name string, perms Permissions) error { - role, err := m.Role(ctx, name) - if err != nil { - return err - } - - revoke, add := permissionsDifference(perms, role.Permissions) - - // first, revoke all the permissions the role currently has, but, - // shouldn't... - if len(revoke) > 0 { - err := m.RemoveRolePerms(ctx, name, revoke) - if err != nil { - return err - } - } - - // ... next, add any permissions the role should have - if len(add) > 0 { - a := &RoleAction{ - Action: "add-permissions", - Role: &Role{ - Name: name, - Permissions: add, - }, - } - return m.Post(ctx, "/role", a, nil) - } - return nil -} - -// SetRoleUsers removes users not in role and then adds the requested users to role -func (m *MetaClient) SetRoleUsers(ctx context.Context, name string, users []string) error { - role, err := m.Role(ctx, name) - if err != nil { - return err - } - revoke, add := Difference(users, role.Users) - if err := m.RemoveRoleUsers(ctx, name, revoke); err != nil { - return err - } - - return m.AddRoleUsers(ctx, name, add) -} - -// Difference compares two sets and returns a set to be removed and a set to be added -func Difference(wants []string, haves []string) (revoke []string, add []string) { - for _, want := range wants { - found := false - for _, got := range haves { - if want != got { - continue - } - found = true - } - if !found { - add = append(add, want) - } - } - for _, got := range haves { - found := false - for _, want := range wants { - if want != got { - continue - } - found = true - break - } - if !found { - revoke = append(revoke, got) - } - } - return -} - -func permissionsDifference(wants Permissions, haves Permissions) (revoke Permissions, add Permissions) { - revoke = make(Permissions) - add = make(Permissions) - for scope, want := range wants { - have, ok := haves[scope] - if ok { - r, a := Difference(want, have) - revoke[scope] = r - add[scope] = a - } else { - add[scope] = want - } - } - - for scope, have := range haves { - _, ok := wants[scope] - if !ok { - revoke[scope] = have - } - } - return -} - -// AddRoleUsers updates a role to have additional users. -func (m *MetaClient) AddRoleUsers(ctx context.Context, name string, users []string) error { - // No permissions to add, so, role is in the right state - if len(users) == 0 { - return nil - } - - a := &RoleAction{ - Action: "add-users", - Role: &Role{ - Name: name, - Users: users, - }, - } - return m.Post(ctx, "/role", a, nil) -} - -// RemoveRoleUsers updates a role to remove some users. -func (m *MetaClient) RemoveRoleUsers(ctx context.Context, name string, users []string) error { - // No permissions to add, so, role is in the right state - if len(users) == 0 { - return nil - } - - a := &RoleAction{ - Action: "remove-users", - Role: &Role{ - Name: name, - Users: users, - }, - } - return m.Post(ctx, "/role", a, nil) -} - -// Post is a helper function to POST to Influx Enterprise -func (m *MetaClient) Post(ctx context.Context, path string, action interface{}, params map[string]string) error { - b, err := json.Marshal(action) - if err != nil { - return err - } - body := bytes.NewReader(b) - _, err = m.Do(ctx, path, "POST", m.authorizer, params, body) - if err != nil { - return err - } - return nil -} - -type defaultClient struct { - Leader string - InsecureSkipVerify bool -} - -// Do is a helper function to interface with Influx Enterprise's Meta API -func (d *defaultClient) Do(URL *url.URL, path, method string, authorizer influx.Authorizer, params map[string]string, body io.Reader) (*http.Response, error) { - p := url.Values{} - for k, v := range params { - p.Add(k, v) - } - - URL.Path = path - URL.RawQuery = p.Encode() - if d.Leader == "" { - d.Leader = URL.Host - } else if d.Leader != URL.Host { - URL.Host = d.Leader - } - - req, err := http.NewRequest(method, URL.String(), body) - if err != nil { - return nil, err - } - - if body != nil { - req.Header.Set("Content-Type", "application/json") - } - - if authorizer != nil { - if err = authorizer.Set(req); err != nil { - return nil, err - } - } - - // Meta servers will redirect (307) to leader. We need - // special handling to preserve authentication headers. - client := &http.Client{ - CheckRedirect: d.AuthedCheckRedirect, - } - - if d.InsecureSkipVerify { - client.Transport = skipVerifyTransport - } else { - client.Transport = defaultTransport - } - - res, err := client.Do(req) - if err != nil { - return nil, err - } - - if res.StatusCode != http.StatusOK { - defer res.Body.Close() - dec := json.NewDecoder(res.Body) - out := &Error{} - err = dec.Decode(out) - if err != nil { - return nil, err - } - return nil, errors.New(out.Error) - } - - return res, nil - -} - -// AuthedCheckRedirect tries to follow the Influx Enterprise pattern of -// redirecting to the leader but preserving authentication headers. -func (d *defaultClient) AuthedCheckRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("too many redirects") - } else if len(via) == 0 { - return nil - } - preserve := "Authorization" - if auth, ok := via[0].Header[preserve]; ok { - req.Header[preserve] = auth - } - d.Leader = req.URL.Host - return nil -} - -// Do is a cancelable function to interface with Influx Enterprise's Meta API -func (m *MetaClient) Do(ctx context.Context, path, method string, authorizer influx.Authorizer, params map[string]string, body io.Reader) (*http.Response, error) { - type result struct { - Response *http.Response - Err error - } - - resps := make(chan (result)) - go func() { - resp, err := m.client.Do(m.URL, path, method, authorizer, params, body) - resps <- result{resp, err} - }() - - select { - case resp := <-resps: - return resp.Response, resp.Err - case <-ctx.Done(): - return nil, chronograf.ErrUpstreamTimeout - } -} diff --git a/chronograf/enterprise/meta_test.go b/chronograf/enterprise/meta_test.go deleted file mode 100644 index c82107e8563..00000000000 --- a/chronograf/enterprise/meta_test.go +++ /dev/null @@ -1,1498 +0,0 @@ -package enterprise - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -func TestMetaClient_ShowCluster(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - tests := []struct { - name string - fields fields - want *Cluster - wantErr bool - }{ - { - name: "Successful Show Cluster", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"data":[{"id":2,"version":"1.1.0-c1.1.0","tcpAddr":"data-1.twinpinesmall.net:8088","httpAddr":"data-1.twinpinesmall.net:8086","httpScheme":"https","status":"joined"}],"meta":[{"id":1,"addr":"meta-0.twinpinesmall.net:8091","httpScheme":"http","tcpAddr":"meta-0.twinpinesmall.net:8089","version":"1.1.0-c1.1.0"}]}`), - nil, - nil, - ), - }, - want: &Cluster{ - DataNodes: []DataNode{ - { - ID: 2, - TCPAddr: "data-1.twinpinesmall.net:8088", - HTTPAddr: "data-1.twinpinesmall.net:8086", - HTTPScheme: "https", - Status: "joined", - }, - }, - MetaNodes: []Node{ - { - ID: 1, - Addr: "meta-0.twinpinesmall.net:8091", - HTTPScheme: "http", - TCPAddr: "meta-0.twinpinesmall.net:8089", - }, - }, - }, - }, - { - name: "Failed Show Cluster", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusBadGateway, - nil, - nil, - fmt.Errorf("time circuits on. Flux Capacitor... fluxxing"), - ), - }, - wantErr: true, - }, - { - name: "Bad JSON from Show Cluster", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{data}`), - nil, - nil, - ), - }, - wantErr: true, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - got, err := m.ShowCluster(context.Background()) - if (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.ShowCluster() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. MetaClient.ShowCluster() = %v, want %v", tt.name, got, tt.want) - } - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) != 1 { - t.Errorf("%q. MetaClient.ShowCluster() expected 1 but got %d", tt.name, len(reqs)) - continue - } - req := reqs[0] - if req.Method != "GET" { - t.Errorf("%q. MetaClient.ShowCluster() expected GET method", tt.name) - } - if req.URL.Path != "/show-cluster" { - t.Errorf("%q. MetaClient.ShowCluster() expected /show-cluster path but got %s", tt.name, req.URL.Path) - } - } -} - -func TestMetaClient_Users(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name *string - } - tests := []struct { - name string - fields fields - args args - want *Users - wantErr bool - }{ - { - name: "Successful Show users", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: nil, - }, - want: &Users{ - Users: []User{ - { - Name: "admin", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - }, - }, - }, - }, - { - name: "Successful Show users single user", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: &[]string{"admin"}[0], - }, - want: &Users{ - Users: []User{ - { - Name: "admin", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - }, - }, - }, - }, - { - name: "Failure Show users", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - fmt.Errorf("time circuits on. Flux Capacitor... fluxxing"), - ), - }, - args: args{ - ctx: context.Background(), - name: nil, - }, - wantErr: true, - }, - { - name: "Bad JSON from Show users", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{foo}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: nil, - }, - wantErr: true, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - got, err := m.Users(tt.args.ctx, tt.args.name) - if (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.Users() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. MetaClient.Users() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestMetaClient_User(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - fields fields - args args - want *User - wantErr bool - }{ - { - name: "Successful Show users", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - want: &User{ - Name: "admin", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - }, - }, - { - name: "No such user", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusNotFound, - []byte(`{"error":"user not found"}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "unknown", - }, - wantErr: true, - }, - { - name: "Bad JSON", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusNotFound, - []byte(`{BAD}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - }, - wantErr: true, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - got, err := m.User(tt.args.ctx, tt.args.name) - if (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.User() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. MetaClient.User() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestMetaClient_CreateUser(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - passwd string - } - tests := []struct { - name string - fields fields - args args - want string - wantErr bool - }{ - { - name: "Successful Create User", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - nil, - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - passwd: "hunter2", - }, - want: `{"action":"create","user":{"name":"admin","password":"hunter2"}}`, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.CreateUser(tt.args.ctx, tt.args.name, tt.args.passwd); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.CreateUser() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) != 1 { - t.Errorf("%q. MetaClient.CreateUser() expected 1 but got %d", tt.name, len(reqs)) - continue - } - req := reqs[0] - if req.Method != "POST" { - t.Errorf("%q. MetaClient.CreateUser() expected POST method", tt.name) - } - if req.URL.Path != "/user" { - t.Errorf("%q. MetaClient.CreateUser() expected /user path but got %s", tt.name, req.URL.Path) - } - got, _ := ioutil.ReadAll(req.Body) - if string(got) != tt.want { - t.Errorf("%q. MetaClient.CreateUser() = %v, want %v", tt.name, string(got), tt.want) - } - } -} - -func TestMetaClient_ChangePassword(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - passwd string - } - tests := []struct { - name string - fields fields - args args - want string - wantErr bool - }{ - { - name: "Successful Change Password", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - nil, - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - passwd: "hunter2", - }, - want: `{"action":"change-password","user":{"name":"admin","password":"hunter2"}}`, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.ChangePassword(tt.args.ctx, tt.args.name, tt.args.passwd); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.ChangePassword() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) != 1 { - t.Errorf("%q. MetaClient.ChangePassword() expected 1 but got %d", tt.name, len(reqs)) - continue - } - req := reqs[0] - if req.Method != "POST" { - t.Errorf("%q. MetaClient.ChangePassword() expected POST method", tt.name) - } - if req.URL.Path != "/user" { - t.Errorf("%q. MetaClient.ChangePassword() expected /user path but got %s", tt.name, req.URL.Path) - } - got, _ := ioutil.ReadAll(req.Body) - if string(got) != tt.want { - t.Errorf("%q. MetaClient.ChangePassword() = %v, want %v", tt.name, string(got), tt.want) - } - } -} - -func TestMetaClient_DeleteUser(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - fields fields - args args - want string - wantErr bool - }{ - { - name: "Successful delete User", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - nil, - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - want: `{"action":"delete","user":{"name":"admin"}}`, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.DeleteUser(tt.args.ctx, tt.args.name); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.DeleteUser() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) != 1 { - t.Errorf("%q. MetaClient.DeleteUser() expected 1 but got %d", tt.name, len(reqs)) - continue - } - req := reqs[0] - if req.Method != "POST" { - t.Errorf("%q. MetaClient.DeleteUser() expected POST method", tt.name) - } - if req.URL.Path != "/user" { - t.Errorf("%q. MetaClient.DeleteUser() expected /user path but got %s", tt.name, req.URL.Path) - } - got, _ := ioutil.ReadAll(req.Body) - if string(got) != tt.want { - t.Errorf("%q. MetaClient.DeleteUser() = %v, want %v", tt.name, string(got), tt.want) - } - } -} - -func TestMetaClient_SetUserPerms(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - perms Permissions - } - tests := []struct { - name string - fields fields - args args - wantRm string - wantAdd string - wantErr bool - }{ - { - name: "Remove all permissions for a user", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - wantRm: `{"action":"remove-permissions","user":{"name":"admin","permissions":{"":["ViewAdmin","ViewChronograf"]}}}`, - }, - { - name: "Remove some permissions and add others", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - perms: Permissions{ - "telegraf": []string{ - "ReadData", - }, - }, - }, - wantRm: `{"action":"remove-permissions","user":{"name":"admin","permissions":{"":["ViewAdmin","ViewChronograf"]}}}`, - wantAdd: `{"action":"add-permissions","user":{"name":"admin","permissions":{"telegraf":["ReadData"]}}}`, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.SetUserPerms(tt.args.ctx, tt.args.name, tt.args.perms); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.SetUserPerms() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) < 2 { - t.Errorf("%q. MetaClient.SetUserPerms() expected 2 but got %d", tt.name, len(reqs)) - continue - } - - usr := reqs[0] - if usr.Method != "GET" { - t.Errorf("%q. MetaClient.SetUserPerms() expected GET method", tt.name) - } - if usr.URL.Path != "/user" { - t.Errorf("%q. MetaClient.SetUserPerms() expected /user path but got %s", tt.name, usr.URL.Path) - } - - prm := reqs[1] - if prm.Method != "POST" { - t.Errorf("%q. MetaClient.SetUserPerms() expected GET method", tt.name) - } - if prm.URL.Path != "/user" { - t.Errorf("%q. MetaClient.SetUserPerms() expected /user path but got %s", tt.name, prm.URL.Path) - } - - got, _ := ioutil.ReadAll(prm.Body) - if string(got) != tt.wantRm { - t.Errorf("%q. MetaClient.SetUserPerms() = %v, want %v", tt.name, string(got), tt.wantRm) - } - if tt.wantAdd != "" { - prm := reqs[2] - if prm.Method != "POST" { - t.Errorf("%q. MetaClient.SetUserPerms() expected GET method", tt.name) - } - if prm.URL.Path != "/user" { - t.Errorf("%q. MetaClient.SetUserPerms() expected /user path but got %s", tt.name, prm.URL.Path) - } - - got, _ := ioutil.ReadAll(prm.Body) - if string(got) != tt.wantAdd { - t.Errorf("%q. MetaClient.SetUserPerms() = %v, want %v", tt.name, string(got), tt.wantAdd) - } - } - } -} - -func TestMetaClient_Roles(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name *string - } - tests := []struct { - name string - fields fields - args args - want *Roles - wantErr bool - }{ - { - name: "Successful Show role", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"admin","users":["marty"],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: nil, - }, - want: &Roles{ - Roles: []Role{ - { - Name: "admin", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - Users: []string{"marty"}, - }, - }, - }, - }, - { - name: "Successful Show role single role", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"admin","users":["marty"],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: &[]string{"admin"}[0], - }, - want: &Roles{ - Roles: []Role{ - { - Name: "admin", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - Users: []string{"marty"}, - }, - }, - }, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - got, err := m.Roles(tt.args.ctx, tt.args.name) - if (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.Roles() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. MetaClient.Roles() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestMetaClient_Role(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - fields fields - args args - want *Role - wantErr bool - }{ - { - name: "Successful Show role", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"admin","users":["marty"],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - want: &Role{ - Name: "admin", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - Users: []string{"marty"}, - }, - }, - { - name: "No such role", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusNotFound, - []byte(`{"error":"user not found"}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "unknown", - }, - wantErr: true, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - got, err := m.Role(tt.args.ctx, tt.args.name) - if (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.Role() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. MetaClient.Role() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestMetaClient_UserRoles(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name *string - } - tests := []struct { - name string - fields fields - args args - want map[string]Roles - wantErr bool - }{ - { - name: "Successful Show all roles", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"timetravelers","users":["marty","docbrown"],"permissions":{"":["ViewAdmin","ViewChronograf"]}},{"name":"mcfly","users":["marty","george"],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: nil, - }, - want: map[string]Roles{ - "marty": Roles{ - Roles: []Role{ - { - Name: "timetravelers", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - Users: []string{"marty", "docbrown"}, - }, - { - Name: "mcfly", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - Users: []string{"marty", "george"}, - }, - }, - }, - "docbrown": Roles{ - Roles: []Role{ - { - Name: "timetravelers", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - Users: []string{"marty", "docbrown"}, - }, - }, - }, - "george": Roles{ - Roles: []Role{ - { - Name: "mcfly", - Permissions: map[string][]string{ - "": []string{ - "ViewAdmin", "ViewChronograf", - }, - }, - Users: []string{"marty", "george"}, - }, - }, - }, - }, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - got, err := m.UserRoles(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.UserRoles() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. MetaClient.UserRoles() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestMetaClient_CreateRole(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - fields fields - args args - want string - wantErr bool - }{ - { - name: "Successful Create Role", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - nil, - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - want: `{"action":"create","role":{"name":"admin"}}`, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.CreateRole(tt.args.ctx, tt.args.name); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.CreateRole() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - reqs := tt.fields.client.Requests - if len(reqs) != 1 { - t.Errorf("%q. MetaClient.CreateRole() expected 1 but got %d", tt.name, len(reqs)) - continue - } - req := reqs[0] - if req.Method != "POST" { - t.Errorf("%q. MetaClient.CreateRole() expected POST method", tt.name) - } - if req.URL.Path != "/role" { - t.Errorf("%q. MetaClient.CreateRole() expected /role path but got %s", tt.name, req.URL.Path) - } - got, _ := ioutil.ReadAll(req.Body) - if string(got) != tt.want { - t.Errorf("%q. MetaClient.CreateRole() = %v, want %v", tt.name, string(got), tt.want) - } - } -} - -func TestMetaClient_DeleteRole(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - fields fields - args args - want string - wantErr bool - }{ - { - name: "Successful delete role", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - nil, - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - want: `{"action":"delete","role":{"name":"admin"}}`, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.DeleteRole(tt.args.ctx, tt.args.name); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.DeleteRole() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) != 1 { - t.Errorf("%q. MetaClient.DeleteRole() expected 1 but got %d", tt.name, len(reqs)) - continue - } - req := reqs[0] - if req.Method != "POST" { - t.Errorf("%q. MetaClient.DeleDeleteRoleteUser() expected POST method", tt.name) - } - if req.URL.Path != "/role" { - t.Errorf("%q. MetaClient.DeleteRole() expected /role path but got %s", tt.name, req.URL.Path) - } - got, _ := ioutil.ReadAll(req.Body) - if string(got) != tt.want { - t.Errorf("%q. MetaClient.DeleteRole() = %v, want %v", tt.name, string(got), tt.want) - } - } -} - -func TestMetaClient_SetRolePerms(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - perms Permissions - } - tests := []struct { - name string - fields fields - args args - wantRm string - wantAdd string - wantErr bool - }{ - { - name: "Remove all roles from user", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"admin","users":["marty"],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - wantRm: `{"action":"remove-permissions","role":{"name":"admin","permissions":{"":["ViewAdmin","ViewChronograf"]}}}`, - }, - { - name: "Remove some users and add permissions to other", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"admin","users":["marty"],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - perms: Permissions{ - "telegraf": []string{ - "ReadData", - }, - }, - }, - wantRm: `{"action":"remove-permissions","role":{"name":"admin","permissions":{"":["ViewAdmin","ViewChronograf"]}}}`, - wantAdd: `{"action":"add-permissions","role":{"name":"admin","permissions":{"telegraf":["ReadData"]}}}`, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.SetRolePerms(tt.args.ctx, tt.args.name, tt.args.perms); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.SetRolePerms() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) < 2 { - t.Errorf("%q. MetaClient.SetRolePerms() expected 2 but got %d", tt.name, len(reqs)) - continue - } - - usr := reqs[0] - if usr.Method != "GET" { - t.Errorf("%q. MetaClient.SetRolePerms() expected GET method", tt.name) - } - if usr.URL.Path != "/role" { - t.Errorf("%q. MetaClient.SetRolePerms() expected /user path but got %s", tt.name, usr.URL.Path) - } - - prm := reqs[1] - if prm.Method != "POST" { - t.Errorf("%q. MetaClient.SetRolePerms() expected GET method", tt.name) - } - if prm.URL.Path != "/role" { - t.Errorf("%q. MetaClient.SetRolePerms() expected /role path but got %s", tt.name, prm.URL.Path) - } - - got, _ := ioutil.ReadAll(prm.Body) - if string(got) != tt.wantRm { - t.Errorf("%q. MetaClient.SetRolePerms() removal = \n%v\n, want \n%v\n", tt.name, string(got), tt.wantRm) - } - if tt.wantAdd != "" { - prm := reqs[2] - if prm.Method != "POST" { - t.Errorf("%q. MetaClient.SetRolePerms() expected GET method", tt.name) - } - if prm.URL.Path != "/role" { - t.Errorf("%q. MetaClient.SetRolePerms() expected /role path but got %s", tt.name, prm.URL.Path) - } - - got, _ := ioutil.ReadAll(prm.Body) - if string(got) != tt.wantAdd { - t.Errorf("%q. MetaClient.SetRolePerms() addition = \n%v\n, want \n%v\n", tt.name, string(got), tt.wantAdd) - } - } - } -} - -func TestMetaClient_SetRoleUsers(t *testing.T) { - type fields struct { - URL *url.URL - client *MockClient - } - type args struct { - ctx context.Context - name string - users []string - } - tests := []struct { - name string - fields fields - args args - wants []string - wantErr bool - }{ - { - name: "Successful set users role (remove user from role)", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"admin","users":["marty"],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - }, - wants: []string{`{"action":"remove-users","role":{"name":"admin","users":["marty"]}}`}, - }, - { - name: "Successful set single user role", - fields: fields{ - URL: &url.URL{ - Host: "twinpinesmall.net:8091", - Scheme: "https", - }, - client: NewMockClient( - http.StatusOK, - []byte(`{"roles":[{"name":"admin","users":[],"permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), - nil, - nil, - ), - }, - args: args{ - ctx: context.Background(), - name: "admin", - users: []string{"marty"}, - }, - wants: []string{ - `{"action":"add-users","role":{"name":"admin","users":["marty"]}}`, - }, - }, - } - for _, tt := range tests { - m := &MetaClient{ - URL: tt.fields.URL, - client: tt.fields.client, - } - if err := m.SetRoleUsers(tt.args.ctx, tt.args.name, tt.args.users); (err != nil) != tt.wantErr { - t.Errorf("%q. MetaClient.SetRoleUsers() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - - if tt.wantErr { - continue - } - reqs := tt.fields.client.Requests - if len(reqs) != len(tt.wants)+1 { - t.Errorf("%q. MetaClient.SetRoleUsers() expected %d but got %d", tt.name, len(tt.wants)+1, len(reqs)) - continue - } - - usr := reqs[0] - if usr.Method != "GET" { - t.Errorf("%q. MetaClient.SetRoleUsers() expected GET method", tt.name) - } - if usr.URL.Path != "/role" { - t.Errorf("%q. MetaClient.SetRoleUsers() expected /user path but got %s", tt.name, usr.URL.Path) - } - for i := range tt.wants { - prm := reqs[i+1] - if prm.Method != "POST" { - t.Errorf("%q. MetaClient.SetRoleUsers() expected GET method", tt.name) - } - if prm.URL.Path != "/role" { - t.Errorf("%q. MetaClient.SetRoleUsers() expected /role path but got %s", tt.name, prm.URL.Path) - } - - got, _ := ioutil.ReadAll(prm.Body) - if string(got) != tt.wants[i] { - t.Errorf("%q. MetaClient.SetRoleUsers() = %v, want %v", tt.name, string(got), tt.wants[i]) - } - } - } -} - -type MockClient struct { - Code int // HTTP Status code - Body []byte - HeaderMap http.Header - Err error - - Requests []*http.Request -} - -func NewMockClient(code int, body []byte, headers http.Header, err error) *MockClient { - return &MockClient{ - Code: code, - Body: body, - HeaderMap: headers, - Err: err, - Requests: make([]*http.Request, 0), - } -} - -func (c *MockClient) Do(URL *url.URL, path, method string, authorizer influx.Authorizer, params map[string]string, body io.Reader) (*http.Response, error) { - if c == nil { - return nil, fmt.Errorf("nil MockClient") - } - if URL == nil { - return nil, fmt.Errorf("nil url") - } - if c.Err != nil { - return nil, c.Err - } - - // Record the request in the mock client - p := url.Values{} - for k, v := range params { - p.Add(k, v) - } - - URL.Path = path - URL.RawQuery = p.Encode() - - req, err := http.NewRequest(method, URL.String(), body) - if err != nil { - return nil, err - } - c.Requests = append(c.Requests, req) - - return &http.Response{ - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - StatusCode: c.Code, - Status: http.StatusText(c.Code), - Header: c.HeaderMap, - Body: ioutil.NopCloser(bytes.NewReader(c.Body)), - }, nil -} - -func Test_AuthedCheckRedirect_Do(t *testing.T) { - var ts2URL string - ts1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - want := http.Header{ - "Referer": []string{ts2URL}, - "Accept-Encoding": []string{"gzip"}, - "Authorization": []string{"hunter2"}, - } - for k, v := range want { - if !reflect.DeepEqual(r.Header[k], v) { - t.Errorf("Request.Header = %#v; want %#v", r.Header[k], v) - } - } - if t.Failed() { - w.Header().Set("Result", "got errors") - } else { - w.Header().Set("Result", "ok") - } - })) - defer ts1.Close() - - ts2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, ts1.URL, http.StatusFound) - })) - defer ts2.Close() - ts2URL = ts2.URL - - tr := &http.Transport{} - defer tr.CloseIdleConnections() - d := &defaultClient{} - c := &http.Client{ - Transport: tr, - CheckRedirect: d.AuthedCheckRedirect, - } - - req, _ := http.NewRequest("GET", ts2.URL, nil) - req.Header.Add("Cookie", "foo=bar") - req.Header.Add("Authorization", "hunter2") - req.Header.Add("Howdy", "doody") - req.Header.Set("User-Agent", "Darth Vader, an extraterrestrial from the Planet Vulcan") - - res, err := c.Do(req) - if err != nil { - t.Fatal(err) - } - - defer res.Body.Close() - if res.StatusCode != 200 { - t.Fatal(res.Status) - } - - if got := res.Header.Get("Result"); got != "ok" { - t.Errorf("result = %q; want ok", got) - } -} - -func Test_defaultClient_Do(t *testing.T) { - type args struct { - path string - method string - authorizer influx.Authorizer - params map[string]string - body io.Reader - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - { - name: "test authorizer", - args: args{ - path: "/tictactoe", - method: "GET", - authorizer: &influx.BasicAuth{ - Username: "Steven Falken", - Password: "JOSHUA", - }, - }, - want: "Basic U3RldmVuIEZhbGtlbjpKT1NIVUE=", - }, - { - name: "test authorizer", - args: args{ - path: "/tictactoe", - method: "GET", - authorizer: &influx.BearerJWT{ - Username: "minifig", - SharedSecret: "legos", - Now: func() time.Time { return time.Time{} }, - }, - }, - want: "Bearer eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOi02MjEzNTU5Njc0MCwidXNlcm5hbWUiOiJtaW5pZmlnIn0.uwFGBQ3MykqEmk9Zx0sBdJGefcESVEXG_qt0C1J8b_aS62EAES-Q1FwtURsbITNvSnfzMxYFnkbSG0AA1pEzWw", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/tictactoe" { - t.Fatal("Expected request to '/query' but was", r.URL.Path) - } - got, ok := r.Header["Authorization"] - if !ok { - t.Fatal("No Authorization header") - } - if got[0] != tt.want { - t.Fatalf("Expected auth %s got %s", tt.want, got) - } - rw.Write([]byte(`{}`)) - })) - defer ts.Close() - - d := &defaultClient{} - u, _ := url.Parse(ts.URL) - _, err := d.Do(u, tt.args.path, tt.args.method, tt.args.authorizer, tt.args.params, tt.args.body) - if (err != nil) != tt.wantErr { - t.Errorf("defaultClient.Do() error = %v, wantErr %v", err, tt.wantErr) - return - } - }) - } -} diff --git a/chronograf/enterprise/mocks_test.go b/chronograf/enterprise/mocks_test.go deleted file mode 100644 index 628044ccf0a..00000000000 --- a/chronograf/enterprise/mocks_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package enterprise_test - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/enterprise" -) - -type ControlClient struct { - Cluster *enterprise.Cluster - ShowClustersCalled bool -} - -func NewMockControlClient(addr string) *ControlClient { - _, err := url.Parse(addr) - if err != nil { - panic(err) - } - - return &ControlClient{ - Cluster: &enterprise.Cluster{ - DataNodes: []enterprise.DataNode{ - enterprise.DataNode{ - HTTPAddr: addr, - }, - }, - }, - } -} - -func (cc *ControlClient) ShowCluster(context.Context) (*enterprise.Cluster, error) { - cc.ShowClustersCalled = true - return cc.Cluster, nil -} - -func (cc *ControlClient) User(ctx context.Context, name string) (*enterprise.User, error) { - return nil, nil -} - -func (cc *ControlClient) CreateUser(ctx context.Context, name, passwd string) error { - return nil -} - -func (cc *ControlClient) DeleteUser(ctx context.Context, name string) error { - return nil -} - -func (cc *ControlClient) ChangePassword(ctx context.Context, name, passwd string) error { - return nil -} - -func (cc *ControlClient) Users(ctx context.Context, name *string) (*enterprise.Users, error) { - return nil, nil -} - -func (cc *ControlClient) SetUserPerms(ctx context.Context, name string, perms enterprise.Permissions) error { - return nil -} - -func (cc *ControlClient) CreateRole(ctx context.Context, name string) error { - return nil -} - -func (cc *ControlClient) Role(ctx context.Context, name string) (*enterprise.Role, error) { - return nil, nil -} - -func (ccm *ControlClient) UserRoles(ctx context.Context) (map[string]enterprise.Roles, error) { - return nil, nil -} - -func (ccm *ControlClient) Roles(ctx context.Context, name *string) (*enterprise.Roles, error) { - return nil, nil -} - -func (cc *ControlClient) DeleteRole(ctx context.Context, name string) error { - return nil -} - -func (cc *ControlClient) SetRolePerms(ctx context.Context, name string, perms enterprise.Permissions) error { - return nil -} - -func (cc *ControlClient) SetRoleUsers(ctx context.Context, name string, users []string) error { - return nil -} - -func (cc *ControlClient) AddRoleUsers(ctx context.Context, name string, users []string) error { - return nil -} - -func (cc *ControlClient) RemoveRoleUsers(ctx context.Context, name string, users []string) error { - return nil -} - -type TimeSeries struct { - URLs []string - Response Response - - QueryCtr int -} - -type Response struct{} - -func (r *Response) MarshalJSON() ([]byte, error) { - return json.Marshal(r) -} - -func (ts *TimeSeries) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) { - ts.QueryCtr++ - return &Response{}, nil -} - -func (ts *TimeSeries) Connect(ctx context.Context, src *chronograf.Source) error { - return nil -} - -func (ts *TimeSeries) Write(ctx context.Context, points []chronograf.Point) error { - return nil -} - -func (ts *TimeSeries) Users(ctx context.Context) chronograf.UsersStore { - return nil -} - -func (ts *TimeSeries) Roles(ctx context.Context) (chronograf.RolesStore, error) { - return nil, nil -} - -func (ts *TimeSeries) Permissions(ctx context.Context) chronograf.Permissions { - return chronograf.Permissions{} -} - -func NewMockTimeSeries(urls ...string) *TimeSeries { - return &TimeSeries{ - URLs: urls, - Response: Response{}, - } -} diff --git a/chronograf/enterprise/roles.go b/chronograf/enterprise/roles.go deleted file mode 100644 index 628a091cce0..00000000000 --- a/chronograf/enterprise/roles.go +++ /dev/null @@ -1,113 +0,0 @@ -package enterprise - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// RolesStore uses a control client operate on Influx Enterprise roles. Roles are -// groups of permissions applied to groups of users -type RolesStore struct { - Ctrl - Logger chronograf.Logger -} - -// Add creates a new Role in Influx Enterprise -// This must be done in three smaller steps: creating, setting permissions, setting users. -func (c *RolesStore) Add(ctx context.Context, u *chronograf.Role) (*chronograf.Role, error) { - if err := c.Ctrl.CreateRole(ctx, u.Name); err != nil { - return nil, err - } - if err := c.Ctrl.SetRolePerms(ctx, u.Name, ToEnterprise(u.Permissions)); err != nil { - return nil, err - } - - users := make([]string, len(u.Users)) - for i, u := range u.Users { - users[i] = u.Name - } - if err := c.Ctrl.SetRoleUsers(ctx, u.Name, users); err != nil { - return nil, err - } - return u, nil -} - -// Delete the Role from Influx Enterprise -func (c *RolesStore) Delete(ctx context.Context, u *chronograf.Role) error { - return c.Ctrl.DeleteRole(ctx, u.Name) -} - -// Get retrieves a Role if name exists. -func (c *RolesStore) Get(ctx context.Context, name string) (*chronograf.Role, error) { - role, err := c.Ctrl.Role(ctx, name) - if err != nil { - return nil, err - } - - // Hydrate all the users to gather their permissions and their roles. - users := make([]chronograf.User, len(role.Users)) - for i, u := range role.Users { - user, err := c.Ctrl.User(ctx, u) - if err != nil { - return nil, err - } - users[i] = chronograf.User{ - Name: user.Name, - Permissions: ToChronograf(user.Permissions), - } - } - return &chronograf.Role{ - Name: role.Name, - Permissions: ToChronograf(role.Permissions), - Users: users, - }, nil -} - -// Update the Role's permissions and roles -func (c *RolesStore) Update(ctx context.Context, u *chronograf.Role) error { - if u.Permissions != nil { - perms := ToEnterprise(u.Permissions) - if err := c.Ctrl.SetRolePerms(ctx, u.Name, perms); err != nil { - return err - } - } - if u.Users != nil { - users := make([]string, len(u.Users)) - for i, u := range u.Users { - users[i] = u.Name - } - return c.Ctrl.SetRoleUsers(ctx, u.Name, users) - } - return nil -} - -// All is all Roles in influx -func (c *RolesStore) All(ctx context.Context) ([]chronograf.Role, error) { - all, err := c.Ctrl.Roles(ctx, nil) - if err != nil { - return nil, err - } - - return all.ToChronograf(), nil -} - -// ToChronograf converts enterprise roles to chronograf -func (r *Roles) ToChronograf() []chronograf.Role { - res := make([]chronograf.Role, len(r.Roles)) - for i, role := range r.Roles { - users := make([]chronograf.User, len(role.Users)) - for i, user := range role.Users { - users[i] = chronograf.User{ - Name: user, - } - } - - res[i] = chronograf.Role{ - Name: role.Name, - Permissions: ToChronograf(role.Permissions), - Users: users, - } - } - return res -} diff --git a/chronograf/enterprise/roles_test.go b/chronograf/enterprise/roles_test.go deleted file mode 100644 index d82fef0a7fa..00000000000 --- a/chronograf/enterprise/roles_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package enterprise - -import ( - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestRoles_ToChronograf(t *testing.T) { - tests := []struct { - name string - roles []Role - want []chronograf.Role - }{ - { - name: "empty roles", - roles: []Role{}, - want: []chronograf.Role{}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &Roles{ - Roles: tt.roles, - } - if got := r.ToChronograf(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("Roles.ToChronograf() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/enterprise/types.go b/chronograf/enterprise/types.go deleted file mode 100644 index d3c241ca283..00000000000 --- a/chronograf/enterprise/types.go +++ /dev/null @@ -1,71 +0,0 @@ -package enterprise - -// Cluster is a collection of data nodes and non-data nodes within a -// Plutonium cluster. -type Cluster struct { - DataNodes []DataNode `json:"data"` - MetaNodes []Node `json:"meta"` -} - -// DataNode represents a data node in an Influx Enterprise Cluster -type DataNode struct { - ID uint64 `json:"id"` // Meta store ID. - TCPAddr string `json:"tcpAddr"` // RPC addr, e.g., host:8088. - HTTPAddr string `json:"httpAddr"` // Client addr, e.g., host:8086. - HTTPScheme string `json:"httpScheme"` // "http" or "https" for HTTP addr. - Status string `json:"status,omitempty"` // The cluster status of the node. -} - -// Node represent any meta or data node in an Influx Enterprise cluster -type Node struct { - ID uint64 `json:"id"` - Addr string `json:"addr"` - HTTPScheme string `json:"httpScheme"` - TCPAddr string `json:"tcpAddr"` -} - -// Permissions maps resources to a set of permissions. -// Specifically, it maps a database to a set of permissions -type Permissions map[string][]string - -// User represents an enterprise user. -type User struct { - Name string `json:"name"` - Password string `json:"password,omitempty"` - Permissions Permissions `json:"permissions,omitempty"` -} - -// Users represents a set of enterprise users. -type Users struct { - Users []User `json:"users,omitempty"` -} - -// UserAction represents and action to be taken with a user. -type UserAction struct { - Action string `json:"action"` - User *User `json:"user"` -} - -// Role is a restricted set of permissions assigned to a set of users. -type Role struct { - Name string `json:"name"` - NewName string `json:"newName,omitempty"` - Permissions Permissions `json:"permissions,omitempty"` - Users []string `json:"users,omitempty"` -} - -// Roles is a set of roles -type Roles struct { - Roles []Role `json:"roles,omitempty"` -} - -// RoleAction represents an action to be taken with a role. -type RoleAction struct { - Action string `json:"action"` - Role *Role `json:"role"` -} - -// Error is JSON error message return by Influx Enterprise's meta API. -type Error struct { - Error string `json:"error"` -} diff --git a/chronograf/enterprise/users.go b/chronograf/enterprise/users.go deleted file mode 100644 index 4651b8cef69..00000000000 --- a/chronograf/enterprise/users.go +++ /dev/null @@ -1,197 +0,0 @@ -package enterprise - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// UserStore uses a control client operate on Influx Enterprise users -type UserStore struct { - Ctrl - Logger chronograf.Logger -} - -// Add creates a new User in Influx Enterprise -func (c *UserStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - if err := c.Ctrl.CreateUser(ctx, u.Name, u.Passwd); err != nil { - return nil, err - } - perms := ToEnterprise(u.Permissions) - - if err := c.Ctrl.SetUserPerms(ctx, u.Name, perms); err != nil { - return nil, err - } - for _, role := range u.Roles { - if err := c.Ctrl.AddRoleUsers(ctx, role.Name, []string{u.Name}); err != nil { - return nil, err - } - } - - return c.Get(ctx, chronograf.UserQuery{Name: &u.Name}) -} - -// Delete the User from Influx Enterprise -func (c *UserStore) Delete(ctx context.Context, u *chronograf.User) error { - return c.Ctrl.DeleteUser(ctx, u.Name) -} - -// Num of users in Influx -func (c *UserStore) Num(ctx context.Context) (int, error) { - all, err := c.All(ctx) - if err != nil { - return 0, err - } - - return len(all), nil -} - -// Get retrieves a user if name exists. -func (c *UserStore) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil { - return nil, fmt.Errorf("query must specify name") - } - u, err := c.Ctrl.User(ctx, *q.Name) - if err != nil { - return nil, err - } - - ur, err := c.Ctrl.UserRoles(ctx) - if err != nil { - return nil, err - } - - role := ur[*q.Name] - cr := role.ToChronograf() - // For now we are removing all users from a role being returned. - for i, r := range cr { - r.Users = []chronograf.User{} - cr[i] = r - } - return &chronograf.User{ - Name: u.Name, - Permissions: ToChronograf(u.Permissions), - Roles: cr, - }, nil -} - -// Update the user's permissions or roles -func (c *UserStore) Update(ctx context.Context, u *chronograf.User) error { - // Only allow one type of change at a time. If it is a password - // change then do it and return without any changes to permissions - if u.Passwd != "" { - return c.Ctrl.ChangePassword(ctx, u.Name, u.Passwd) - } - - if u.Roles != nil { - // Make a list of the roles we want this user to have: - want := make([]string, len(u.Roles)) - for i, r := range u.Roles { - want[i] = r.Name - } - - // Find the list of all roles this user is currently in - userRoles, err := c.UserRoles(ctx) - if err != nil { - return nil - } - // Make a list of the roles the user currently has - roles := userRoles[u.Name] - have := make([]string, len(roles.Roles)) - for i, r := range roles.Roles { - have[i] = r.Name - } - - // Calculate the roles the user will be removed from and the roles the user - // will be added to. - revoke, add := Difference(want, have) - - // First, add the user to the new roles - for _, role := range add { - if err := c.Ctrl.AddRoleUsers(ctx, role, []string{u.Name}); err != nil { - return err - } - } - - // ... and now remove the user from an extra roles - for _, role := range revoke { - if err := c.Ctrl.RemoveRoleUsers(ctx, role, []string{u.Name}); err != nil { - return err - } - } - } - - if u.Permissions != nil { - perms := ToEnterprise(u.Permissions) - return c.Ctrl.SetUserPerms(ctx, u.Name, perms) - } - return nil -} - -// All is all users in influx -func (c *UserStore) All(ctx context.Context) ([]chronograf.User, error) { - all, err := c.Ctrl.Users(ctx, nil) - if err != nil { - return nil, err - } - - ur, err := c.Ctrl.UserRoles(ctx) - if err != nil { - return nil, err - } - - res := make([]chronograf.User, len(all.Users)) - for i, user := range all.Users { - role := ur[user.Name] - cr := role.ToChronograf() - // For now we are removing all users from a role being returned. - for i, r := range cr { - r.Users = []chronograf.User{} - cr[i] = r - } - - res[i] = chronograf.User{ - Name: user.Name, - Permissions: ToChronograf(user.Permissions), - Roles: cr, - } - } - return res, nil -} - -// ToEnterprise converts chronograf permission shape to enterprise -func ToEnterprise(perms chronograf.Permissions) Permissions { - res := Permissions{} - for _, perm := range perms { - if perm.Scope == chronograf.AllScope { - // Enterprise uses empty string as the key for all databases - res[""] = perm.Allowed - } else { - res[perm.Name] = perm.Allowed - } - } - return res -} - -// ToChronograf converts enterprise permissions shape to chronograf shape -func ToChronograf(perms Permissions) chronograf.Permissions { - res := chronograf.Permissions{} - for db, perm := range perms { - // Enterprise uses empty string as the key for all databases - if db == "" { - res = append(res, chronograf.Permission{ - Scope: chronograf.AllScope, - Allowed: perm, - }) - } else { - res = append(res, chronograf.Permission{ - Scope: chronograf.DBScope, - Name: db, - Allowed: perm, - }) - - } - } - return res -} diff --git a/chronograf/enterprise/users_test.go b/chronograf/enterprise/users_test.go deleted file mode 100644 index 0b1d0975d2e..00000000000 --- a/chronograf/enterprise/users_test.go +++ /dev/null @@ -1,866 +0,0 @@ -package enterprise_test - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/enterprise" -) - -func TestClient_Add(t *testing.T) { - type fields struct { - Ctrl *mockCtrl - Logger chronograf.Logger - } - type args struct { - ctx context.Context - u *chronograf.User - } - tests := []struct { - name string - fields fields - args args - want *chronograf.User - wantErr bool - }{ - { - name: "Successful Create User", - fields: fields{ - Ctrl: &mockCtrl{ - createUser: func(ctx context.Context, name, passwd string) error { - return nil - }, - setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error { - return nil - }, - user: func(ctx context.Context, name string) (*enterprise.User, error) { - return &enterprise.User{ - Name: "marty", - Password: "johnny be good", - Permissions: map[string][]string{ - "": { - "ViewChronograf", - "ReadData", - "WriteData", - }, - }, - }, nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{}, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Passwd: "johnny be good", - }, - }, - want: &chronograf.User{ - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"}, - }, - }, - Roles: []chronograf.Role{}, - }, - }, - { - name: "Successful Create User with roles", - fields: fields{ - Ctrl: &mockCtrl{ - createUser: func(ctx context.Context, name, passwd string) error { - return nil - }, - setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error { - return nil - }, - user: func(ctx context.Context, name string) (*enterprise.User, error) { - return &enterprise.User{ - Name: "marty", - Password: "johnny be good", - Permissions: map[string][]string{ - "": { - "ViewChronograf", - "ReadData", - "WriteData", - }, - }, - }, nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{ - "marty": enterprise.Roles{ - Roles: []enterprise.Role{ - { - Name: "admin", - }, - }, - }, - }, nil - }, - addRoleUsers: func(ctx context.Context, name string, users []string) error { - return nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Passwd: "johnny be good", - Roles: []chronograf.Role{ - { - Name: "admin", - }, - }, - }, - }, - want: &chronograf.User{ - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"}, - }, - }, - Roles: []chronograf.Role{ - { - Name: "admin", - Users: []chronograf.User{}, - Permissions: chronograf.Permissions{}, - }, - }, - }, - }, - { - name: "Failure to Create User", - fields: fields{ - Ctrl: &mockCtrl{ - createUser: func(ctx context.Context, name, passwd string) error { - return fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?") - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Passwd: "johnny be good", - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - c := &enterprise.UserStore{ - Ctrl: tt.fields.Ctrl, - Logger: tt.fields.Logger, - } - got, err := c.Add(tt.args.ctx, tt.args.u) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.Add() = \n%#v\n, want \n%#v\n", tt.name, got, tt.want) - } - } -} - -func TestClient_Delete(t *testing.T) { - type fields struct { - Ctrl *mockCtrl - Logger chronograf.Logger - } - type args struct { - ctx context.Context - u *chronograf.User - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "Successful Delete User", - fields: fields{ - Ctrl: &mockCtrl{ - deleteUser: func(ctx context.Context, name string) error { - return nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Passwd: "johnny be good", - }, - }, - }, - { - name: "Failure to Delete User", - fields: fields{ - Ctrl: &mockCtrl{ - deleteUser: func(ctx context.Context, name string) error { - return fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?") - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Passwd: "johnny be good", - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - c := &enterprise.UserStore{ - Ctrl: tt.fields.Ctrl, - Logger: tt.fields.Logger, - } - if err := c.Delete(tt.args.ctx, tt.args.u); (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func TestClient_Get(t *testing.T) { - type fields struct { - Ctrl *mockCtrl - Logger chronograf.Logger - } - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - fields fields - args args - want *chronograf.User - wantErr bool - }{ - { - name: "Successful Get User", - fields: fields{ - Ctrl: &mockCtrl{ - user: func(ctx context.Context, name string) (*enterprise.User, error) { - return &enterprise.User{ - Name: "marty", - Password: "johnny be good", - Permissions: map[string][]string{ - "": { - "ViewChronograf", - "ReadData", - "WriteData", - }, - }, - }, nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{}, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - name: "marty", - }, - want: &chronograf.User{ - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"}, - }, - }, - Roles: []chronograf.Role{}, - }, - }, - { - name: "Successful Get User with roles", - fields: fields{ - Ctrl: &mockCtrl{ - user: func(ctx context.Context, name string) (*enterprise.User, error) { - return &enterprise.User{ - Name: "marty", - Password: "johnny be good", - Permissions: map[string][]string{ - "": { - "ViewChronograf", - "ReadData", - "WriteData", - }, - }, - }, nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{ - "marty": enterprise.Roles{ - Roles: []enterprise.Role{ - { - Name: "timetravels", - Permissions: map[string][]string{ - "": { - "ViewChronograf", - "ReadData", - "WriteData", - }, - }, - Users: []string{"marty", "docbrown"}, - }, - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - name: "marty", - }, - want: &chronograf.User{ - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"}, - }, - }, - Roles: []chronograf.Role{ - { - Name: "timetravels", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"}, - }, - }, - Users: []chronograf.User{}, - }, - }, - }, - }, - { - name: "Failure to get User", - fields: fields{ - Ctrl: &mockCtrl{ - user: func(ctx context.Context, name string) (*enterprise.User, error) { - return nil, fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?") - }, - }, - }, - args: args{ - ctx: context.Background(), - name: "marty", - }, - wantErr: true, - }, - } - for _, tt := range tests { - c := &enterprise.UserStore{ - Ctrl: tt.fields.Ctrl, - Logger: tt.fields.Logger, - } - got, err := c.Get(tt.args.ctx, chronograf.UserQuery{Name: &tt.args.name}) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.Get() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestClient_Update(t *testing.T) { - type fields struct { - Ctrl *mockCtrl - Logger chronograf.Logger - } - type args struct { - ctx context.Context - u *chronograf.User - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "Successful Change Password", - fields: fields{ - Ctrl: &mockCtrl{ - changePassword: func(ctx context.Context, name, passwd string) error { - return nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Passwd: "johnny be good", - }, - }, - }, - { - name: "Failure to Change Password", - fields: fields{ - Ctrl: &mockCtrl{ - changePassword: func(ctx context.Context, name, passwd string) error { - return fmt.Errorf("ronald Reagan, the actor?! Ha Then who’s Vice President Jerry Lewis? I suppose Jane Wyman is First Lady") - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Passwd: "johnny be good", - }, - }, - wantErr: true, - }, - { - name: "Success setting permissions User", - fields: fields{ - Ctrl: &mockCtrl{ - setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error { - return nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{}, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"}, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "Success setting permissions and roles for user", - fields: fields{ - Ctrl: &mockCtrl{ - setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error { - return nil - }, - addRoleUsers: func(ctx context.Context, name string, users []string) error { - return nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{}, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"}, - }, - }, - Roles: []chronograf.Role{ - { - Name: "adminrole", - }, - }, - }, - }, - wantErr: false, - }, - { - name: "Failure setting permissions User", - fields: fields{ - Ctrl: &mockCtrl{ - setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error { - return fmt.Errorf("they found me, I don't know how, but they found me.") - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{}, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"}, - }, - }, - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - c := &enterprise.UserStore{ - Ctrl: tt.fields.Ctrl, - Logger: tt.fields.Logger, - } - if err := c.Update(tt.args.ctx, tt.args.u); (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func TestClient_Num(t *testing.T) { - type fields struct { - Ctrl *mockCtrl - Logger chronograf.Logger - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - want []chronograf.User - wantErr bool - }{ - { - name: "Successful Get User", - fields: fields{ - Ctrl: &mockCtrl{ - users: func(ctx context.Context, name *string) (*enterprise.Users, error) { - return &enterprise.Users{ - Users: []enterprise.User{ - { - Name: "marty", - Password: "johnny be good", - Permissions: map[string][]string{ - "": { - "ViewChronograf", - "ReadData", - "WriteData", - }, - }, - }, - }, - }, nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{}, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - }, - want: []chronograf.User{ - { - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"}, - }, - }, - Roles: []chronograf.Role{}, - }, - }, - }, - { - name: "Failure to get User", - fields: fields{ - Ctrl: &mockCtrl{ - users: func(ctx context.Context, name *string) (*enterprise.Users, error) { - return nil, fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?") - }, - }, - }, - args: args{ - ctx: context.Background(), - }, - wantErr: true, - }, - } - for _, tt := range tests { - c := &enterprise.UserStore{ - Ctrl: tt.fields.Ctrl, - Logger: tt.fields.Logger, - } - got, err := c.Num(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Num() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != len(tt.want) { - t.Errorf("%q. Client.Num() = %v, want %v", tt.name, got, len(tt.want)) - } - } -} - -func TestClient_All(t *testing.T) { - type fields struct { - Ctrl *mockCtrl - Logger chronograf.Logger - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - want []chronograf.User - wantErr bool - }{ - { - name: "Successful Get User", - fields: fields{ - Ctrl: &mockCtrl{ - users: func(ctx context.Context, name *string) (*enterprise.Users, error) { - return &enterprise.Users{ - Users: []enterprise.User{ - { - Name: "marty", - Password: "johnny be good", - Permissions: map[string][]string{ - "": { - "ViewChronograf", - "ReadData", - "WriteData", - }, - }, - }, - }, - }, nil - }, - userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) { - return map[string]enterprise.Roles{}, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - }, - want: []chronograf.User{ - { - Name: "marty", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"}, - }, - }, - Roles: []chronograf.Role{}, - }, - }, - }, - { - name: "Failure to get User", - fields: fields{ - Ctrl: &mockCtrl{ - users: func(ctx context.Context, name *string) (*enterprise.Users, error) { - return nil, fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?") - }, - }, - }, - args: args{ - ctx: context.Background(), - }, - wantErr: true, - }, - } - for _, tt := range tests { - c := &enterprise.UserStore{ - Ctrl: tt.fields.Ctrl, - Logger: tt.fields.Logger, - } - got, err := c.All(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.All() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func Test_ToEnterprise(t *testing.T) { - tests := []struct { - name string - perms chronograf.Permissions - want enterprise.Permissions - }{ - { - name: "All Scopes", - want: enterprise.Permissions{"": []string{"ViewChronograf", "KapacitorAPI"}}, - perms: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"}, - }, - }, - }, - { - name: "DB Scope", - want: enterprise.Permissions{"telegraf": []string{"ReadData", "WriteData"}}, - perms: chronograf.Permissions{ - { - Scope: chronograf.DBScope, - Name: "telegraf", - Allowed: chronograf.Allowances{"ReadData", "WriteData"}, - }, - }, - }, - } - for _, tt := range tests { - if got := enterprise.ToEnterprise(tt.perms); !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. ToEnterprise() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func Test_ToChronograf(t *testing.T) { - tests := []struct { - name string - perms enterprise.Permissions - want chronograf.Permissions - }{ - { - name: "All Scopes", - perms: enterprise.Permissions{"": []string{"ViewChronograf", "KapacitorAPI"}}, - want: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"}, - }, - }, - }, - { - name: "DB Scope", - perms: enterprise.Permissions{"telegraf": []string{"ReadData", "WriteData"}}, - want: chronograf.Permissions{ - { - Scope: chronograf.DBScope, - Name: "telegraf", - Allowed: chronograf.Allowances{"ReadData", "WriteData"}, - }, - }, - }, - } - for _, tt := range tests { - if got := enterprise.ToChronograf(tt.perms); !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. toChronograf() = %v, want %v", tt.name, got, tt.want) - } - } -} - -type mockCtrl struct { - showCluster func(ctx context.Context) (*enterprise.Cluster, error) - user func(ctx context.Context, name string) (*enterprise.User, error) - createUser func(ctx context.Context, name, passwd string) error - deleteUser func(ctx context.Context, name string) error - changePassword func(ctx context.Context, name, passwd string) error - users func(ctx context.Context, name *string) (*enterprise.Users, error) - setUserPerms func(ctx context.Context, name string, perms enterprise.Permissions) error - - userRoles func(ctx context.Context) (map[string]enterprise.Roles, error) - - roles func(ctx context.Context, name *string) (*enterprise.Roles, error) - role func(ctx context.Context, name string) (*enterprise.Role, error) - createRole func(ctx context.Context, name string) error - deleteRole func(ctx context.Context, name string) error - setRolePerms func(ctx context.Context, name string, perms enterprise.Permissions) error - setRoleUsers func(ctx context.Context, name string, users []string) error - addRoleUsers func(ctx context.Context, name string, users []string) error - removeRoleUsers func(ctx context.Context, name string, users []string) error -} - -func (m *mockCtrl) ShowCluster(ctx context.Context) (*enterprise.Cluster, error) { - return m.showCluster(ctx) -} - -func (m *mockCtrl) User(ctx context.Context, name string) (*enterprise.User, error) { - return m.user(ctx, name) -} - -func (m *mockCtrl) CreateUser(ctx context.Context, name, passwd string) error { - return m.createUser(ctx, name, passwd) -} - -func (m *mockCtrl) DeleteUser(ctx context.Context, name string) error { - return m.deleteUser(ctx, name) -} - -func (m *mockCtrl) ChangePassword(ctx context.Context, name, passwd string) error { - return m.changePassword(ctx, name, passwd) -} - -func (m *mockCtrl) Users(ctx context.Context, name *string) (*enterprise.Users, error) { - return m.users(ctx, name) -} - -func (m *mockCtrl) SetUserPerms(ctx context.Context, name string, perms enterprise.Permissions) error { - return m.setUserPerms(ctx, name, perms) -} - -func (m *mockCtrl) UserRoles(ctx context.Context) (map[string]enterprise.Roles, error) { - return m.userRoles(ctx) -} - -func (m *mockCtrl) Roles(ctx context.Context, name *string) (*enterprise.Roles, error) { - return m.roles(ctx, name) -} - -func (m *mockCtrl) Role(ctx context.Context, name string) (*enterprise.Role, error) { - return m.role(ctx, name) -} - -func (m *mockCtrl) CreateRole(ctx context.Context, name string) error { - return m.createRole(ctx, name) -} - -func (m *mockCtrl) DeleteRole(ctx context.Context, name string) error { - return m.deleteRole(ctx, name) -} - -func (m *mockCtrl) SetRolePerms(ctx context.Context, name string, perms enterprise.Permissions) error { - return m.setRolePerms(ctx, name, perms) -} - -func (m *mockCtrl) SetRoleUsers(ctx context.Context, name string, users []string) error { - return m.setRoleUsers(ctx, name, users) -} - -func (m *mockCtrl) AddRoleUsers(ctx context.Context, name string, users []string) error { - return m.addRoleUsers(ctx, name, users) -} - -func (m *mockCtrl) RemoveRoleUsers(ctx context.Context, name string, users []string) error { - return m.removeRoleUsers(ctx, name, users) -} diff --git a/chronograf/etc/Dockerfile_build b/chronograf/etc/Dockerfile_build deleted file mode 100644 index 2e91918bb11..00000000000 --- a/chronograf/etc/Dockerfile_build +++ /dev/null @@ -1,44 +0,0 @@ -FROM ubuntu:trusty - -RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \ - apt-transport-https \ - python-dev \ - wget \ - curl \ - git \ - mercurial \ - make \ - ruby \ - ruby-dev \ - rpm \ - zip \ - python-pip \ - autoconf \ - libtool - -RUN pip install boto requests python-jose --upgrade -RUN gem install fpm - -# Install node -ENV NODE_VERSION v8.10.0 -RUN wget -q https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-linux-x64.tar.gz; \ - tar -xvf node-${NODE_VERSION}-linux-x64.tar.gz -C / --strip-components=1; \ - rm -f node-${NODE_VERSION}-linux-x64.tar.gz - -# Install go -ENV GOPATH /root/go -ENV GO_VERSION 1.10 -ENV GO_ARCH amd64 -RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ - tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ - rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz -ENV PATH /usr/local/go/bin:$PATH - -ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb/chronograf -ENV PATH $GOPATH/bin:$PATH -RUN mkdir -p $PROJECT_DIR -WORKDIR $PROJECT_DIR - -VOLUME $PROJECT_DIR - -ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/chronograf/etc/build.py" ] diff --git a/chronograf/etc/README.md b/chronograf/etc/README.md deleted file mode 100644 index d94fd49056f..00000000000 --- a/chronograf/etc/README.md +++ /dev/null @@ -1,15 +0,0 @@ -## Builds - -Builds are run from a docker build image that is configured with the node and go we support. -Our circle.yml uses this docker container to build, test and create release packages. - -### Updating new node/go versions -After updating the Dockerfile_build run - -`docker build -t quay.io/influxdb/builder:chronograf-$(date "+%Y%m%d") -f Dockerfile_build .` - -and push to quay with: -`docker push quay.io/influxdb/builder:chronograf-$(date "+%Y%m%d")` - -### Update circle -Update DOCKER_TAG in circle.yml to the new container. diff --git a/chronograf/etc/build.py b/chronograf/etc/build.py deleted file mode 100755 index 833500dbe05..00000000000 --- a/chronograf/etc/build.py +++ /dev/null @@ -1,1054 +0,0 @@ -#!/usr/bin/python -u - -import sys -import os -import subprocess -from datetime import datetime -import shutil -import tempfile -import hashlib -import re -import logging -import argparse -import json -import fs - -################ -#### Chronograf Variables -################ - -# Packaging variables -PACKAGE_NAME = "chronograf" -INSTALL_ROOT_DIR = "/usr/bin" -LOG_DIR = "/var/log/chronograf" -DATA_DIR = "/var/lib/chronograf" -SCRIPT_DIR = "/usr/lib/chronograf/scripts" -LOGROTATE_DIR = "/etc/logrotate.d" -CANNED_DIR = "/usr/share/chronograf/canned" -RESOURCES_DIR = "/usr/share/chronograf/resources" - -INIT_SCRIPT = "etc/scripts/init.sh" -SYSTEMD_SCRIPT = "etc/scripts/chronograf.service" -POSTINST_SCRIPT = "etc/scripts/post-install.sh" -POSTUNINST_SCRIPT = "etc/scripts/post-uninstall.sh" -LOGROTATE_SCRIPT = "etc/scripts/logrotate" -CANNED_SCRIPTS = "canned/*json" - -# Default AWS S3 bucket for uploads -DEFAULT_BUCKET = "dl.influxdata.com/chronograf/artifacts" - -CONFIGURATION_FILES = [ - LOGROTATE_DIR + '/chronograf', -] - -PACKAGE_LICENSE = "AGPLv3" -PACKAGE_URL = "https://github.com/influxdata/influxdb/chronograf" -MAINTAINER = "contact@influxdb.com" -VENDOR = "InfluxData" -DESCRIPTION = "Open source monitoring and visualization UI for the entire TICK stack." - -prereqs = [ 'git', 'go', 'yarn' ] -go_vet_command = "go tool vet ./" -optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ] - -fpm_common_args = "-f -s dir --log error \ ---vendor {} \ ---url {} \ ---after-install {} \ ---after-remove {} \ ---license {} \ ---maintainer {} \ ---directories {} \ ---directories {} \ ---description \"{}\"".format( - VENDOR, - PACKAGE_URL, - POSTINST_SCRIPT, - POSTUNINST_SCRIPT, - PACKAGE_LICENSE, - MAINTAINER, - LOG_DIR, - DATA_DIR, - DESCRIPTION) - -for f in CONFIGURATION_FILES: - fpm_common_args += " --config-files {}".format(f) - -targets = { - 'chronograf' : './cmd/chronograf', - 'chronoctl' : './cmd/chronoctl', -} - -supported_builds = { - 'darwin': [ "amd64" ], - 'windows': [ "amd64" ], - 'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ] -} - -supported_packages = { - "darwin": [ "tar" ], - "linux": [ "deb", "rpm", "tar" ], - "windows": [ "zip" ], - "freebsd": [ "tar" ] -} - -################ -#### Chronograf Functions -################ - -def print_banner(): - logging.info(""" - ___ _ __ - / __| |_ _ _ ___ _ _ ___ __ _ _ _ __ _ / _| - | (__| ' \| '_/ _ \ ' \/ _ \/ _` | '_/ _` | _| - \___|_||_|_| \___/_||_\___/\__, |_| \__,_|_| - |___/ - Build Script -""") - -def create_package_fs(build_root): - """Create a filesystem structure to mimic the package filesystem. - """ - logging.debug("Creating package filesystem at location: {}".format(build_root)) - # Using [1:] for the path names due to them being absolute - # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ - INSTALL_ROOT_DIR[1:], - LOG_DIR[1:], - DATA_DIR[1:], - SCRIPT_DIR[1:], - LOGROTATE_DIR[1:], - CANNED_DIR[1:], - RESOURCES_DIR[1:] - ] - for d in dirs: - os.makedirs(os.path.join(build_root, d)) - os.chmod(os.path.join(build_root, d), 0o755) - -def package_scripts(build_root, config_only=False, windows=False): - """Copy the necessary scripts to the package filesystem. - """ - if config_only: - pass - else: - logging.debug("Copying scripts to build directory.") - files = [ - (INIT_SCRIPT, SCRIPT_DIR, "init.sh"), - (SYSTEMD_SCRIPT, SCRIPT_DIR, "chronograf.service"), - (LOGROTATE_SCRIPT, LOGROTATE_DIR, "chronograf") - ] - for script, dir, name in files: - dest = os.path.join(build_root, dir[1:], name) - logging.debug("Moving {} to {}".format(script, dest)) - shutil.copyfile(script, dest) - os.chmod(dest, 0o644) - run("cp {} {} && chmod 644 {}".format(CANNED_SCRIPTS, - os.path.join(build_root, CANNED_DIR[1:]), - os.path.join(build_root, CANNED_DIR[1:], "*json")), - shell=True, print_output=True) - -def run_generate(): - """Generate static assets. - """ - start_time = datetime.utcnow() - logging.info("Generating static assets...") - run("make assets", shell=True, print_output=True) - end_time = datetime.utcnow() - logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) - return True - -def make_clean(): - """Generate static assets. - """ - start_time = datetime.utcnow() - run("make clean", shell=True, print_output=True) - end_time = datetime.utcnow() - logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) - return True - - -def go_get(branch, update=False, no_uncommitted=False): - """Retrieve build dependencies or restore pinned dependencies. - """ - start_time = datetime.utcnow() - if local_changes() and no_uncommitted: - logging.error("There are uncommitted changes in the current directory.") - return False - run("make dep", shell=True, print_output=True) - end_time = datetime.utcnow() - logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) - return True - -def run_tests(race, parallel, timeout, no_vet): - """Run the Go and NPM test suite on binary output. - """ - start_time = datetime.utcnow() - logging.info("Running tests...") - run("make test", shell=True, print_output=True) - end_time = datetime.utcnow() - logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) - return True - -################ -#### All Chronograf-specific content above this line -################ - -def run(command, allow_failure=False, shell=False, print_output=False): - """Run shell command (convenience wrapper around subprocess). - """ - out = None - logging.debug("{}".format(command)) - try: - cmd = command - if not shell: - cmd = command.split() - - stdout = subprocess.PIPE - stderr = subprocess.STDOUT - if print_output: - stdout = None - - p = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr) - out, _ = p.communicate() - if out is not None: - out = out.decode('utf-8').strip() - if p.returncode != 0: - if allow_failure: - logging.warn(u"Command '{}' failed with error: {}".format(command, out)) - return None - else: - logging.error(u"Command '{}' failed with error: {}".format(command, out)) - sys.exit(1) - except OSError as e: - if allow_failure: - logging.warn("Command '{}' failed with error: {}".format(command, e)) - return out - else: - logging.error("Command '{}' failed with error: {}".format(command, e)) - sys.exit(1) - else: - return out - -def create_temp_dir(prefix = None): - """ Create temporary directory with optional prefix. - """ - if prefix is None: - return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) - else: - return tempfile.mkdtemp(prefix=prefix) - -def increment_minor_version(version): - """Return the version with the minor version incremented and patch - version set to zero. - """ - ver_list = version.split('.') - if len(ver_list) != 3: - logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) - return version - ver_list[1] = str(int(ver_list[1]) + 1) - ver_list[2] = str(0) - inc_version = '.'.join(ver_list) - logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) - return inc_version - -def get_current_version_tag(): - """Retrieve the raw git version tag. - """ - version = run("git describe --always --tags --abbrev=0") - return version - -def get_current_version(): - """Parse version information from git tag output. - """ - version_tag = get_current_version_tag() - # Remove leading 'v' - if version_tag[0] == 'v': - version_tag = version_tag[1:] - # Replace any '-'/'_' with '~' - if '-' in version_tag: - version_tag = version_tag.replace("-","~") - if '_' in version_tag: - version_tag = version_tag.replace("_","~") - return version_tag - -def get_current_commit(short=False): - """Retrieve the current git commit. - """ - command = None - if short: - command = "git log --pretty=format:'%h' -n 1" - else: - command = "git rev-parse HEAD" - out = run(command) - return out.strip('\'\n\r ') - -def get_current_branch(): - """Retrieve the current git branch. - """ - command = "git rev-parse --abbrev-ref HEAD" - out = run(command) - return out.strip() - -def local_changes(): - """Return True if there are local un-committed changes. - """ - output = run("git diff-files --ignore-submodules --").strip() - if len(output) > 0: - return True - return False - -def get_system_arch(): - """Retrieve current system architecture. - """ - arch = os.uname()[4] - if arch == "x86_64": - arch = "amd64" - elif arch == "386": - arch = "i386" - elif 'arm' in arch: - # Prevent uname from reporting full ARM arch (eg 'armv7l') - arch = "arm" - return arch - -def get_system_platform(): - """Retrieve current system platform. - """ - if sys.platform.startswith("linux"): - return "linux" - else: - return sys.platform - -def get_go_version(): - """Retrieve version information for Go. - """ - out = run("go version") - matches = re.search('go version go(\S+)', out) - if matches is not None: - return matches.groups()[0].strip() - return None - -def check_path_for(b): - """Check the the user's path for the provided binary. - """ - def is_exe(fpath): - return os.path.isfile(fpath) and os.access(fpath, os.X_OK) - - for path in os.environ["PATH"].split(os.pathsep): - path = path.strip('"') - full_path = os.path.join(path, b) - if os.path.isfile(full_path) and os.access(full_path, os.X_OK): - return full_path - -def check_environ(build_dir = None): - """Check environment for common Go variables. - """ - logging.info("Checking environment...") - for v in [ "GOPATH", "GOBIN", "GOROOT" ]: - logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) - - cwd = os.getcwd() - if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: - logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.") - return True - -def check_prereqs(): - """Check user path for required dependencies. - """ - logging.info("Checking for dependencies...") - for req in prereqs: - if not check_path_for(req): - logging.error("Could not find dependency: {}".format(req)) - return False - return True - -def upload_packages(packages, bucket_name=None, overwrite=False): - """Upload provided package output to AWS S3. - """ - logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages)) - try: - import boto - from boto.s3.key import Key - from boto.s3.connection import OrdinaryCallingFormat - logging.getLogger("boto").setLevel(logging.WARNING) - except ImportError: - logging.warn("Cannot upload packages without 'boto' Python library!") - return False - logging.info("Connecting to AWS S3...") - # Up the number of attempts to 10 from default of 1 - boto.config.add_section("Boto") - boto.config.set("Boto", "metadata_service_num_attempts", "10") - c = boto.connect_s3(calling_format=OrdinaryCallingFormat()) - if bucket_name is None: - bucket_name = DEFAULT_BUCKET - bucket = c.get_bucket(bucket_name.split('/')[0]) - for p in packages: - if '/' in bucket_name: - # Allow for nested paths within the bucket name (ex: - # bucket/folder). Assuming forward-slashes as path - # delimiter. - name = os.path.join('/'.join(bucket_name.split('/')[1:]), - os.path.basename(p)) - else: - name = os.path.basename(p) - logging.debug("Using key: {}".format(name)) - if bucket.get_key(name) is None or overwrite: - logging.info("Uploading file {}".format(name)) - k = Key(bucket) - k.key = name - if overwrite: - n = k.set_contents_from_filename(p, replace=True) - else: - n = k.set_contents_from_filename(p, replace=False) - k.make_public() - else: - logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) - return True - -def go_list(vendor=False, relative=False): - """ - Return a list of packages - If vendor is False vendor package are not included - If relative is True the package prefix defined by PACKAGE_URL is stripped - """ - p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - packages = out.split('\n') - if packages[-1] == '': - packages = packages[:-1] - if not vendor: - non_vendor = [] - for p in packages: - if '/vendor/' not in p: - non_vendor.append(p) - packages = non_vendor - if relative: - relative_pkgs = [] - for p in packages: - r = p.replace(PACKAGE_URL, '.') - if r != '.': - relative_pkgs.append(r) - packages = relative_pkgs - return packages - -def build(version=None, - platform=None, - arch=None, - nightly=False, - race=False, - clean=False, - outdir=".", - tags=[], - static=False): - """Build each target for the specified architecture and platform. - """ - logging.info("Starting build for {}/{}...".format(platform, arch)) - logging.info("Using Go version: {}".format(get_go_version())) - logging.info("Using git branch: {}".format(get_current_branch())) - logging.info("Using git commit: {}".format(get_current_commit())) - if static: - logging.info("Using statically-compiled output.") - if race: - logging.info("Race is enabled.") - if len(tags) > 0: - logging.info("Using build tags: {}".format(','.join(tags))) - - logging.info("Sending build output to: {}".format(outdir)) - if not os.path.exists(outdir): - os.makedirs(outdir) - elif clean and outdir != '/' and outdir != ".": - logging.info("Cleaning build directory '{}' before building.".format(outdir)) - shutil.rmtree(outdir) - os.makedirs(outdir) - - logging.info("Using version '{}' for build.".format(version)) - - for target, path in targets.items(): - logging.info("Building target: {}".format(target)) - build_command = "" - - # Handle static binary output - if static is True or "static_" in arch: - if "static_" in arch: - static = True - arch = arch.replace("static_", "") - build_command += "CGO_ENABLED=0 " - - # Handle variations in architecture output - if arch == "i386" or arch == "i686": - arch = "386" - elif "arm" in arch: - arch = "arm" - build_command += "GOOS={} GOARCH={} ".format(platform, arch) - - if "arm" in arch: - if arch == "armel": - build_command += "GOARM=5 " - elif arch == "armhf" or arch == "arm": - build_command += "GOARM=6 " - elif arch == "arm64": - # TODO(rossmcdonald) - Verify this is the correct setting for arm64 - build_command += "GOARM=7 " - else: - logging.error("Invalid ARM architecture specified: {}".format(arch)) - logging.error("Please specify either 'armel', 'armhf', or 'arm64'.") - return False - if platform == 'windows': - target = target + '.exe' - build_command += "go build -o {} ".format(os.path.join(outdir, target)) - if race: - build_command += "-race " - if len(tags) > 0: - build_command += "-tags {} ".format(','.join(tags)) - if "1.4" in get_go_version(): - if static: - build_command += "-ldflags=\"-s -X main.version {} -X main.commit {}\" ".format(version, - get_current_commit()) - else: - build_command += "-ldflags=\"-X main.version {} -X main.commit {}\" ".format(version, - get_current_commit()) - - else: - # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' - if static: - build_command += "-ldflags=\"-s -X main.version={} -X main.commit={}\" ".format(version, - get_current_commit()) - else: - build_command += "-ldflags=\"-X main.version={} -X main.commit={}\" ".format(version, - get_current_commit()) - if static: - build_command += "-a -installsuffix cgo " - build_command += path - start_time = datetime.utcnow() - run(build_command, shell=True, print_output=True) - end_time = datetime.utcnow() - logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) - return True - -def generate_sha256_from_file(path): - """Generate SHA256 signature based on the contents of the file at path. - """ - m = hashlib.sha256() - with open(path, 'rb') as f: - for chunk in iter(lambda: f.read(4096), b""): - m.update(chunk) - return m.hexdigest() - -def generate_md5_from_file(path): - """Generate MD5 signature based on the contents of the file at path. - """ - m = hashlib.md5() - with open(path, 'rb') as f: - for chunk in iter(lambda: f.read(4096), b""): - m.update(chunk) - return m.hexdigest() - -def generate_sig_from_file(path): - """Generate a detached GPG signature from the file at path. - """ - logging.debug("Generating GPG signature for file: {}".format(path)) - gpg_path = check_path_for('gpg') - if gpg_path is None: - logging.warn("gpg binary not found on path! Skipping signature creation.") - return False - if os.environ.get("GNUPG_HOME") is not None: - run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path)) - else: - run('gpg --armor --detach-sign --yes {}'.format(path)) - return True - -def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False): - """Package the output of the build process. - """ - outfiles = [] - tmp_build_dir = create_temp_dir() - logging.debug("Packaging for build output: {}".format(build_output)) - logging.info("Using temporary directory: {}".format(tmp_build_dir)) - try: - for platform in build_output: - # Create top-level folder displaying which platform (linux, etc) - os.makedirs(os.path.join(tmp_build_dir, platform)) - for arch in build_output[platform]: - logging.info("Creating packages for {}/{}".format(platform, arch)) - # Create second-level directory displaying the architecture (amd64, etc) - current_location = build_output[platform][arch] - - # Create directory tree to mimic file system of package - build_root = os.path.join(tmp_build_dir, - platform, - arch, - '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) - os.makedirs(build_root) - - # Copy packaging scripts to build directory - if platform == "windows": - # For windows and static builds, just copy - # binaries to root of package (no other scripts or - # directories) - package_scripts(build_root, config_only=True, windows=True) - elif static or "static_" in arch: - package_scripts(build_root, config_only=True) - else: - create_package_fs(build_root) - package_scripts(build_root) - - for binary in targets: - # Copy newly-built binaries to packaging directory - if platform == 'windows': - binary = binary + '.exe' - if platform == 'windows' or static or "static_" in arch: - # Where the binary should go in the package filesystem - to = os.path.join(build_root, binary) - # Where the binary currently is located - fr = os.path.join(current_location, binary) - else: - # Where the binary currently is located - fr = os.path.join(current_location, binary) - # Where the binary should go in the package filesystem - to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) - shutil.copy(fr, to) - - for package_type in supported_packages[platform]: - # Package the directory structure for each package type for the platform - logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) - name = pkg_name - # Reset version, iteration, and current location on each run - # since they may be modified below. - package_version = version - package_iteration = iteration - if "static_" in arch: - # Remove the "static_" from the displayed arch on the package - package_arch = arch.replace("static_", "") - else: - package_arch = arch - if not release and not nightly: - # For non-release builds, just use the commit hash as the version - package_version = "{}~{}".format(version, - get_current_commit(short=True)) - package_iteration = "0" - package_build_root = build_root - current_location = build_output[platform][arch] - - if package_type in ['zip', 'tar']: - # For tars and zips, start the packaging one folder above - # the build root (to include the package name) - package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) - if nightly: - if static or "static_" in arch: - name = '{}-static-nightly_{}_{}'.format(name, - platform, - package_arch) - else: - name = '{}-nightly_{}_{}'.format(name, - platform, - package_arch) - else: - if static or "static_" in arch: - name = '{}-{}-static_{}_{}'.format(name, - package_version, - platform, - package_arch) - else: - name = '{}-{}_{}_{}'.format(name, - package_version, - platform, - package_arch) - current_location = os.path.join(os.getcwd(), current_location) - if package_type == 'tar': - tar_command = "cd {} && tar -cvzf {}.tar.gz --owner=root ./*".format(package_build_root, name) - run(tar_command, shell=True, print_output=True) - run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True) - outfile = os.path.join(current_location, name + ".tar.gz") - outfiles.append(outfile) - elif package_type == 'zip': - zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name) - run(zip_command, shell=True, print_output=True) - run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True) - outfile = os.path.join(current_location, name + ".zip") - outfiles.append(outfile) - elif package_type not in ['zip', 'tar'] and static or "static_" in arch: - logging.info("Skipping package type '{}' for static builds.".format(package_type)) - else: - fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( - fpm_common_args, - name, - package_arch, - package_type, - package_version, - package_iteration, - package_build_root, - current_location) - if package_type == "rpm": - fpm_command += "--depends coreutils --depends shadow-utils" - # TODO: Check for changelog - # elif package_type == "deb": - # fpm_command += "--deb-changelog {} ".format(os.path.join(os.getcwd(), "CHANGELOG.md")) - out = run(fpm_command, shell=True) - matches = re.search(':path=>"(.*)"', out) - outfile = None - if matches is not None: - outfile = matches.groups()[0] - if outfile is None: - logging.warn("Could not determine output from packaging output!") - else: - if nightly: - # TODO: check if this is correct - # if package_type == 'rpm': - # # rpm's convert any dashes to underscores - # package_version = package_version.replace("-", "_") - # logging.debug("Changing package output version from {} to {} for RPM.".format(version, package_version)) - # Strip nightly version from package name - new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly") - os.rename(outfile, new_outfile) - outfile = new_outfile - else: - if package_type == 'rpm': - # rpm's convert any dashes to underscores - package_version = package_version.replace("-", "_") - logging.debug("Changing package output version from {} to {} for RPM.".format(version, package_version)) - new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version) - os.rename(outfile, new_outfile) - outfile = new_outfile - outfiles.append(os.path.join(os.getcwd(), outfile)) - logging.debug("Produced package files: {}".format(outfiles)) - return outfiles - finally: - pass - # Cleanup - # shutil.rmtree(tmp_build_dir) - -def main(args): - global PACKAGE_NAME - - if args.release and args.nightly: - logging.error("Cannot be both a nightly and a release.") - return 1 - - if args.nightly: - args.version = increment_minor_version(args.version) - args.version = "{}~n{}".format(args.version, - datetime.utcnow().strftime("%Y%m%d%H%M")) - args.iteration = 0 - - # Pre-build checks - check_environ() - if not check_prereqs(): - return 1 - if args.build_tags is None: - args.build_tags = [] - else: - args.build_tags = args.build_tags.split(',') - - orig_commit = get_current_commit(short=True) - orig_branch = get_current_branch() - - if args.platform not in supported_builds and args.platform != 'all': - logging.error("Invalid build platform: {}".format(args.platform)) - return 1 - - build_output = {} - - if args.branch != orig_branch and args.commit != orig_commit: - logging.error("Can only specify one branch or commit to build from.") - return 1 - elif args.branch != orig_branch: - logging.info("Moving to git branch: {}".format(args.branch)) - run("git checkout {}".format(args.branch), print_output=True) - elif args.commit != orig_commit: - logging.info("Moving to git commit: {}".format(args.commit)) - run("git checkout {}".format(args.commit), print_output=True) - - if args.clean: - if not make_clean(): - return 1 - - if not args.no_get: - if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted): - return 1 - - if args.generate: - if not run_generate(): - return 1 - - if args.test: - if not run_tests(args.race, args.parallel, args.timeout, args.no_vet): - return 1 - - if args.no_build: - return 0 - - platforms = [] - single_build = True - if args.platform == 'all': - platforms = supported_builds.keys() - single_build = False - else: - platforms = [args.platform] - - for platform in platforms: - build_output.update( { platform : {} } ) - archs = [] - if args.arch == "all": - single_build = False - archs = supported_builds.get(platform) - else: - archs = [args.arch] - - for arch in archs: - od = args.outdir - if not single_build: - od = os.path.join(args.outdir, platform, arch) - if not build(version=args.version, - platform=platform, - arch=arch, - nightly=args.nightly, - race=args.race, - clean=args.clean, - outdir=od, - tags=args.build_tags, - static=args.static): - return 1 - build_output.get(platform).update( { arch : od } ) - - # Build packages - if args.package: - if not check_path_for("fpm"): - logging.error("FPM ruby gem required for packaging. Stopping.") - return 1 - packages = package(build_output, - args.name, - args.version, - nightly=args.nightly, - iteration=args.iteration, - static=args.static, - release=args.release) - if args.sign: - logging.debug("Generating GPG signatures for packages: {}".format(packages)) - sigs = [] # retain signatures so they can be uploaded with packages - for p in packages: - if generate_sig_from_file(p): - sigs.append(p + '.asc') - else: - logging.error("Creation of signature for package [{}] failed!".format(p)) - return 1 - packages += sigs - if args.upload: - logging.debug("Files staged for upload: {}".format(packages)) - if args.nightly: - args.upload_overwrite = True - if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): - return 1 - package_output = {} - for p in packages: - p_name = p.split('/')[-1:][0] - if ".asc" in p_name: - # Skip public keys - continue - - arch = None - type = None - regex = None - nice_name = None - if ".deb" in p_name: - type = "ubuntu" - nice_name = "Ubuntu" - regex = r"^.+_(.+)\.deb$" - elif ".rpm" in p_name: - type = "centos" - nice_name = "CentOS" - regex = r"^.+\.(.+)\.rpm$" - elif ".tar.gz" in p_name: - if "linux" in p_name: - if "static" in p_name: - type = "linux_static" - nice_name = "Linux Static" - else: - type = "linux" - nice_name = "Linux" - elif "darwin" in p_name: - type = "darwin" - nice_name = "Mac OS X" - regex = r"^.+_(.+)\.tar.gz$" - elif ".zip" in p_name: - if "windows" in p_name: - type = "windows" - nice_name = "Windows" - regex = r"^.+_(.+)\.zip$" - - if regex is None or type is None: - logging.error("Could not determine package type for: {}".format(p)) - return 1 - match = re.search(regex, p_name) - arch = match.groups()[0] - if arch is None: - logging.error("Could not determine arch for: {}".format(p)) - return 1 - if arch == "x86_64": - arch = "amd64" - elif arch == "x86_32": - arch = "i386" - package_name = str(arch) + "_" + str(type) - package_output[package_name] = { - "sha256": generate_sha256_from_file(p), - "md5": generate_md5_from_file(p), - "filename": p_name, - "name": nice_name, - "link": "https://dl.influxdata.com/chronograf/releases/" + p_name.rsplit('/', 1)[-1], - } - - # Print the downloads in Markdown format for the release - if args.release: - lines = [] - for package_name, v in package_output.items(): - line = v['name'] + " | [" + v['filename'] +"](" + v['link'] + ") | `" + v['sha256'] + "`" - lines.append(line) - lines.sort() - - print ("## Docker") - print("`docker pull quay.io/influxdb/chronograf:"+get_current_version_tag() + "`") - print("") - print("## Packages") - print("") - print("Platform | Package | SHA256") - print("--- | --- | ---") - for line in lines: - print(line) - package_output["version"] = args.version - logging.info(json.dumps(package_output, sort_keys=True, indent=4)) - if orig_branch != get_current_branch(): - logging.info("Moving back to original git branch: {}".format(orig_branch)) - run("git checkout {}".format(orig_branch), print_output=True) - - return 0 - -if __name__ == '__main__': - LOG_LEVEL = logging.INFO - if '--debug' in sys.argv[1:]: - LOG_LEVEL = logging.DEBUG - log_format = '[%(levelname)s] %(funcName)s: %(message)s' - logging.basicConfig(stream=sys.stdout, - level=LOG_LEVEL, - format=log_format) - - parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') - parser.add_argument('--verbose','-v','--debug', - action='store_true', - help='Use debug output') - parser.add_argument('--outdir', '-o', - metavar='', - default='./build/', - type=os.path.abspath, - help='Output directory') - parser.add_argument('--name', '-n', - metavar='', - default=PACKAGE_NAME, - type=str, - help='Name to use for package name (when package is specified)') - parser.add_argument('--arch', - metavar='', - type=str, - default=get_system_arch(), - help='Target architecture for build output') - parser.add_argument('--platform', - metavar='', - type=str, - default=get_system_platform(), - help='Target platform for build output') - parser.add_argument('--branch', - metavar='', - type=str, - default=get_current_branch(), - help='Build from a specific branch') - parser.add_argument('--commit', - metavar='', - type=str, - default=get_current_commit(short=True), - help='Build from a specific commit') - parser.add_argument('--version', - metavar='', - type=str, - default=get_current_version(), - help='Version information to apply to build output (ex: 0.12.0)') - parser.add_argument('--iteration', - metavar='', - type=str, - default="1", - help='Package iteration to apply to build output (defaults to 1)') - parser.add_argument('--stats', - action='store_true', - help='Emit build metrics (requires InfluxDB Python client)') - parser.add_argument('--stats-server', - metavar='', - type=str, - help='Send build stats to InfluxDB using provided hostname and port') - parser.add_argument('--stats-db', - metavar='', - type=str, - help='Send build stats to InfluxDB using provided database name') - parser.add_argument('--nightly', - action='store_true', - help='Mark build output as nightly build (will incremement the minor version)') - parser.add_argument('--update', - action='store_true', - help='Update build dependencies prior to building') - parser.add_argument('--package', - action='store_true', - help='Package binary output') - parser.add_argument('--release', - action='store_true', - help='Mark build output as release') - parser.add_argument('--clean', - action='store_true', - help='Clean output directory before building') - parser.add_argument('--no-get', - action='store_true', - help='Do not retrieve pinned dependencies when building') - parser.add_argument('--no-uncommitted', - action='store_true', - help='Fail if uncommitted changes exist in the working directory') - parser.add_argument('--upload', - action='store_true', - help='Upload output packages to AWS S3') - parser.add_argument('--upload-overwrite','-w', - action='store_true', - help='Upload output packages to AWS S3') - parser.add_argument('--bucket', - metavar='', - type=str, - default=DEFAULT_BUCKET, - help='Destination bucket for uploads') - parser.add_argument('--generate', - action='store_true', - default=True, - help='Run "go generate" before building') - parser.add_argument('--build-tags', - metavar='', - help='Optional build tags to use for compilation') - parser.add_argument('--static', - action='store_true', - help='Create statically-compiled binary output') - parser.add_argument('--sign', - action='store_true', - help='Create GPG detached signatures for packages (when package is specified)') - parser.add_argument('--test', - action='store_true', - help='Run tests (does not produce build output)') - parser.add_argument('--no-vet', - action='store_true', - help='Do not run "go vet" when running tests') - parser.add_argument('--race', - action='store_true', - help='Enable race flag for build output') - parser.add_argument('--parallel', - metavar='', - type=int, - help='Number of tests to run simultaneously') - parser.add_argument('--timeout', - metavar='', - type=str, - help='Timeout for tests before failing') - parser.add_argument('--no-build', - action='store_true', - help='Dont build anything.') - args = parser.parse_args() - print_banner() - sys.exit(main(args)) diff --git a/chronograf/etc/config.sample.toml b/chronograf/etc/config.sample.toml deleted file mode 100644 index e575d4ef6f3..00000000000 --- a/chronograf/etc/config.sample.toml +++ /dev/null @@ -1 +0,0 @@ -# TODO: wire up configuration files \ No newline at end of file diff --git a/chronograf/etc/licenses.sh b/chronograf/etc/licenses.sh deleted file mode 100644 index 5cab4b66fc6..00000000000 --- a/chronograf/etc/licenses.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -for a in `gdl -no-vendored -test -repo ./... | awk 'NR>1 {print $5}'`; do echo \[\]\($a/blob/master/\) ; done -nlf -c |awk -F, '{printf "%s %s \[%s\]\(%s\)\n", $1, $2, $5, $4}' diff --git a/chronograf/etc/scripts/chronograf.service b/chronograf/etc/scripts/chronograf.service deleted file mode 100644 index 272429f865d..00000000000 --- a/chronograf/etc/scripts/chronograf.service +++ /dev/null @@ -1,21 +0,0 @@ -# If you modify this, please also make sure to edit init.sh - -[Unit] -Description=Open source monitoring and visualization UI for the entire TICK stack. -Documentation="https://www.influxdata.com/time-series-platform/chronograf/" -After=network-online.target - -[Service] -User=chronograf -Group=chronograf -Environment="HOST=0.0.0.0" -Environment="PORT=8888" -Environment="BOLT_PATH=/var/lib/chronograf/chronograf-v1.db" -Environment="CANNED_PATH=/usr/share/chronograf/canned" -EnvironmentFile=-/etc/default/chronograf -ExecStart=/usr/bin/chronograf $CHRONOGRAF_OPTS -KillMode=control-group -Restart=on-failure - -[Install] -WantedBy=multi-user.target diff --git a/chronograf/etc/scripts/docker/build.sh b/chronograf/etc/scripts/docker/build.sh deleted file mode 100755 index c40a1a236bf..00000000000 --- a/chronograf/etc/scripts/docker/build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -x -docker_tag="chronograf-$(date +%Y%m%d)" - -docker build --rm=false -f etc/Dockerfile_build -t builder:$docker_tag . -docker tag builder:$docker_tag quay.io/influxdb/builder:$docker_tag - -docker push quay.io/influxdb/builder:$docker_tag diff --git a/chronograf/etc/scripts/docker/pull.sh b/chronograf/etc/scripts/docker/pull.sh deleted file mode 100755 index dfe72f531e1..00000000000 --- a/chronograf/etc/scripts/docker/pull.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# Pull the required build image from quay.io. -# - -if [[ -z "$DOCKER_TAG" ]]; then - echo "Please specify a tag to pull from with the DOCKER_TAG env variable." - exit 1 -fi - -docker pull quay.io/influxdb/builder:$DOCKER_TAG diff --git a/chronograf/etc/scripts/docker/run.sh b/chronograf/etc/scripts/docker/run.sh deleted file mode 100755 index 025952aec11..00000000000 --- a/chronograf/etc/scripts/docker/run.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# -# Pass all CLI arguments to Chronograf builder Docker image (passing -# them to the build scripts) -# -# WARNING: This script passes your SSH and AWS credentials within the -# Docker image, so use with caution. -# - -set -e - -# Default SSH key to $HOME/.ssh/id_rsa if not set -test -z $SSH_KEY_PATH && SSH_KEY_PATH="$HOME/.ssh/id_rsa" -echo "Using SSH key located at: $SSH_KEY_PATH" - -# Default docker tag if not specified -test -z "$DOCKER_TAG" && DOCKER_TAG="chronograf-20161121" - -docker run \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -v $SSH_KEY_PATH:/root/.ssh/id_rsa \ - -v ~/.ssh/known_hosts:/root/.ssh/known_hosts \ - -v $(pwd):/root/go/src/github.com/influxdata/influxdb/chronograf \ - quay.io/influxdb/builder:$DOCKER_TAG \ - "$@" diff --git a/chronograf/etc/scripts/init.sh b/chronograf/etc/scripts/init.sh deleted file mode 100755 index 6b52743f016..00000000000 --- a/chronograf/etc/scripts/init.sh +++ /dev/null @@ -1,112 +0,0 @@ -#!/bin/bash -### BEGIN INIT INFO -# Provides: chronograf -# Required-Start: $local_fs $network $named $time $syslog -# Required-Stop: $local_fs $network $named $time $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Start the Chronograf service at boot time -### END INIT INFO - -# If you modify this, please make sure to also edit chronograf.service - -# Script to execute when starting -SCRIPT="/usr/bin/chronograf" -export HOST="0.0.0.0" -export PORT="8888" -export BOLT_PATH="/var/lib/chronograf/chronograf-v1.db" -export CANNED_PATH="/usr/share/chronograf/canned" -# Options to pass to the script on startup -. /etc/default/chronograf -SCRIPT_OPTS="${CHRONOGRAF_OPTS}" - -# User to run the process under -RUNAS=chronograf - -# PID file for process -PIDFILE=/var/run/chronograf.pid -# Where to redirect logging to -LOGFILE=/var/log/chronograf/chronograf.log - -start() { - if [[ -f $PIDFILE ]]; then - # PIDFILE exists - if kill -0 $(cat $PIDFILE) &>/dev/null; then - # PID up, service running - echo '[OK] Service already running.' >&2 - return 0 - fi - fi - local CMD="$SCRIPT $SCRIPT_OPTS 1>>\"$LOGFILE\" 2>&1 & echo \$!" - su -s /bin/sh -c "$CMD" $RUNAS > "$PIDFILE" - if [[ -f $PIDFILE ]]; then - # PIDFILE exists - if kill -0 $(cat $PIDFILE) &>/dev/null; then - # PID up, service running - echo '[OK] Service successfully started.' >&2 - return 0 - fi - fi - echo '[ERROR] Could not start service.' >&2 - return 1 -} - -status() { - if [[ -f $PIDFILE ]]; then - # PIDFILE exists - if ps -p $(cat $PIDFILE) &>/dev/null; then - # PID up, service running - echo '[OK] Service running.' >&2 - return 0 - fi - fi - echo '[ERROR] Service not running.' >&2 - return 1 -} - -stop() { - if [[ -f $PIDFILE ]]; then - # PIDFILE still exists - if kill -0 $(cat $PIDFILE) &>/dev/null; then - # PID still up - kill -15 $(cat $PIDFILE) &>/dev/null && rm -f "$PIDFILE" &>/dev/null - if [[ "$?" = "0" ]]; then - # Successful stop - echo '[OK] Service stopped.' >&2 - return 0 - else - # Unsuccessful stop - echo '[ERROR] Could not stop service.' >&2 - return 1 - fi - fi - fi - echo "[OK] Service already stopped." - return 0 -} - -case "$1" in - start) - if [[ "$UID" != "0" ]]; then - echo "[ERROR] Permission denied." - exit 1 - fi - start - ;; - status) - status - ;; - stop) - if [[ "$UID" != "0" ]]; then - echo "[ERROR] Permission denied." - exit 1 - fi - stop - ;; - restart) - stop - start - ;; - *) - echo "Usage: $0 {start|status|stop|restart}" - esac diff --git a/chronograf/etc/scripts/logrotate b/chronograf/etc/scripts/logrotate deleted file mode 100644 index f172c0dff69..00000000000 --- a/chronograf/etc/scripts/logrotate +++ /dev/null @@ -1,9 +0,0 @@ -/var/log/chronograf/chronograf.log { - daily - rotate 7 - missingok - dateext - copytruncate - compress - notifempty -} diff --git a/chronograf/etc/scripts/post-install.sh b/chronograf/etc/scripts/post-install.sh deleted file mode 100644 index 0d570c53b8e..00000000000 --- a/chronograf/etc/scripts/post-install.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash - -BIN_DIR=/usr/bin -DATA_DIR=/var/lib/chronograf -LOG_DIR=/var/log/chronograf -SCRIPT_DIR=/usr/lib/chronograf/scripts -LOGROTATE_DIR=/etc/logrotate.d - -function install_init { - cp -f $SCRIPT_DIR/init.sh /etc/init.d/chronograf - chmod +x /etc/init.d/chronograf -} - -function install_systemd { - # Remove any existing symlinks - rm -f /etc/systemd/system/chronograf.service - - cp -f $SCRIPT_DIR/chronograf.service /lib/systemd/system/chronograf.service - systemctl enable chronograf || true - systemctl daemon-reload || true -} - -function install_update_rcd { - update-rc.d chronograf defaults -} - -function install_chkconfig { - chkconfig --add chronograf -} - -id chronograf &>/dev/null -if [[ $? -ne 0 ]]; then - useradd --system -U -M chronograf -s /bin/false -d $DATA_DIR -fi - -test -d $LOG_DIR || mkdir -p $DATA_DIR -test -d $DATA_DIR || mkdir -p $DATA_DIR -chown -R -L chronograf:chronograf $LOG_DIR -chown -R -L chronograf:chronograf $DATA_DIR -chmod 755 $LOG_DIR -chmod 755 $DATA_DIR - -# Remove legacy symlink, if it exists -if [[ -L /etc/init.d/chronograf ]]; then - rm -f /etc/init.d/chronograf -fi - -# Add defaults file, if it doesn't exist -if [[ ! -f /etc/default/chronograf ]]; then - touch /etc/default/chronograf -fi - -# Distribution-specific logic -if [[ -f /etc/redhat-release ]]; then - # RHEL-variant logic - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - install_systemd - else - # Assuming sysv - install_init - install_chkconfig - fi -elif [[ -f /etc/debian_version ]]; then - # Debian/Ubuntu logic - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - install_systemd - systemctl restart chronograf || echo "WARNING: systemd not running." - else - # Assuming sysv - install_init - install_update_rcd - invoke-rc.d chronograf restart - fi -elif [[ -f /etc/os-release ]]; then - source /etc/os-release - if [[ $ID = "amzn" ]]; then - # Amazon Linux logic - install_init - install_chkconfig - fi -fi diff --git a/chronograf/etc/scripts/post-uninstall.sh b/chronograf/etc/scripts/post-uninstall.sh deleted file mode 100644 index 7fee6e4c4d3..00000000000 --- a/chronograf/etc/scripts/post-uninstall.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -function disable_systemd { - systemctl disable chronograf - rm -f /lib/systemd/system/chronograf.service -} - -function disable_update_rcd { - update-rc.d -f chronograf remove - rm -f /etc/init.d/chronograf -} - -function disable_chkconfig { - chkconfig --del chronograf - rm -f /etc/init.d/chronograf -} - -if [[ -f /etc/redhat-release ]]; then - # RHEL-variant logic - if [[ "$1" = "0" ]]; then - # chronograf is no longer installed, remove from init system - rm -f /etc/default/chronograf - - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - disable_systemd - else - # Assuming sysv - disable_chkconfig - fi - fi -elif [[ -f /etc/lsb-release ]]; then - # Debian/Ubuntu logic - if [[ "$1" != "upgrade" ]]; then - # Remove/purge - rm -f /etc/default/chronograf - - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - disable_systemd - else - # Assuming sysv - disable_update_rcd - fi - fi -elif [[ -f /etc/os-release ]]; then - source /etc/os-release - if [[ $ID = "amzn" ]]; then - # Amazon Linux logic - if [[ "$1" = "0" ]]; then - # chronograf is no longer installed, remove from init system - rm -f /etc/default/chronograf - disable_chkconfig - fi - fi -fi diff --git a/chronograf/filestore/apps.go b/chronograf/filestore/apps.go deleted file mode 100644 index 902808f6613..00000000000 --- a/chronograf/filestore/apps.go +++ /dev/null @@ -1,205 +0,0 @@ -package filestore - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/pkg/fs" -) - -// AppExt is the the file extension searched for in the directory for layout files -const AppExt = ".json" - -// Apps are canned JSON layouts. Implements LayoutsStore. -type Apps struct { - Dir string // Dir is the directory contained the pre-canned applications. - Load func(string) (chronograf.Layout, error) // Load loads string name and return a Layout - Filename func(string, chronograf.Layout) string // Filename takes dir and layout and returns loadable file - Create func(string, chronograf.Layout) error // Create will write layout to file. - ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename. - Remove func(name string) error // Remove file - IDs chronograf.ID // IDs generate unique ids for new application layouts - Logger chronograf.Logger -} - -// NewApps constructs a layout store wrapping a file system directory -func NewApps(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.LayoutsStore { - return &Apps{ - Dir: dir, - Load: loadFile, - Filename: fileName, - Create: createLayout, - ReadDir: ioutil.ReadDir, - Remove: os.Remove, - IDs: ids, - Logger: logger, - } -} - -func fileName(dir string, layout chronograf.Layout) string { - base := fmt.Sprintf("%s%s", layout.Measurement, AppExt) - return path.Join(dir, base) -} - -func loadFile(name string) (chronograf.Layout, error) { - octets, err := ioutil.ReadFile(name) - if err != nil { - return chronograf.Layout{}, chronograf.ErrLayoutNotFound - } - var layout chronograf.Layout - if err = json.Unmarshal(octets, &layout); err != nil { - return chronograf.Layout{}, chronograf.ErrLayoutInvalid - } - return layout, nil -} - -func createLayout(file string, layout chronograf.Layout) error { - h, err := fs.CreateFile(file) - if err != nil { - return err - } - defer h.Close() - if octets, err := json.MarshalIndent(layout, " ", " "); err != nil { - return chronograf.ErrLayoutInvalid - } else if _, err := h.Write(octets); err != nil { - return err - } - - return nil -} - -// All returns all layouts from the directory -func (a *Apps) All(ctx context.Context) ([]chronograf.Layout, error) { - files, err := a.ReadDir(a.Dir) - if err != nil { - return nil, err - } - - layouts := []chronograf.Layout{} - for _, file := range files { - if path.Ext(file.Name()) != AppExt { - continue - } - if layout, err := a.Load(path.Join(a.Dir, file.Name())); err != nil { - continue // We want to load all files we can. - } else { - layouts = append(layouts, layout) - } - } - return layouts, nil -} - -// Add creates a new layout within the directory -func (a *Apps) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) { - var err error - layout.ID, err = a.IDs.Generate() - if err != nil { - a.Logger. - WithField("component", "apps"). - Error("Unable to generate ID") - return chronograf.Layout{}, err - } - file := a.Filename(a.Dir, layout) - if err = a.Create(file, layout); err != nil { - if err == chronograf.ErrLayoutInvalid { - a.Logger. - WithField("component", "apps"). - WithField("name", file). - Error("Invalid Layout: ", err) - } else { - a.Logger. - WithField("component", "apps"). - WithField("name", file). - Error("Unable to write layout:", err) - } - return chronograf.Layout{}, err - } - return layout, nil -} - -// Delete removes a layout file from the directory -func (a *Apps) Delete(ctx context.Context, layout chronograf.Layout) error { - _, file, err := a.idToFile(layout.ID) - if err != nil { - return err - } - - if err := a.Remove(file); err != nil { - a.Logger. - WithField("component", "apps"). - WithField("name", file). - Error("Unable to remove layout:", err) - return err - } - return nil -} - -// Get returns an app file from the layout directory -func (a *Apps) Get(ctx context.Context, ID string) (chronograf.Layout, error) { - l, file, err := a.idToFile(ID) - if err != nil { - return chronograf.Layout{}, err - } - - if err != nil { - if err == chronograf.ErrLayoutNotFound { - a.Logger. - WithField("component", "apps"). - WithField("name", file). - Error("Unable to read file") - } else if err == chronograf.ErrLayoutInvalid { - a.Logger. - WithField("component", "apps"). - WithField("name", file). - Error("File is not a layout") - } - return chronograf.Layout{}, err - } - return l, nil -} - -// Update replaces a layout from the file system directory -func (a *Apps) Update(ctx context.Context, layout chronograf.Layout) error { - l, _, err := a.idToFile(layout.ID) - if err != nil { - return err - } - - if err := a.Delete(ctx, l); err != nil { - return err - } - file := a.Filename(a.Dir, layout) - return a.Create(file, layout) -} - -// idToFile takes an id and finds the associated filename -func (a *Apps) idToFile(ID string) (chronograf.Layout, string, error) { - // Because the entire layout information is not known at this point, we need - // to try to find the name of the file through matching the ID in the layout - // content with the ID passed. - files, err := a.ReadDir(a.Dir) - if err != nil { - return chronograf.Layout{}, "", err - } - - for _, f := range files { - if path.Ext(f.Name()) != AppExt { - continue - } - file := path.Join(a.Dir, f.Name()) - layout, err := a.Load(file) - if err != nil { - return chronograf.Layout{}, "", err - } - if layout.ID == ID { - return layout, file, nil - } - } - - return chronograf.Layout{}, "", chronograf.ErrLayoutNotFound -} diff --git a/chronograf/filestore/apps_test.go b/chronograf/filestore/apps_test.go deleted file mode 100644 index f304c75f580..00000000000 --- a/chronograf/filestore/apps_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package filestore_test - -import ( - "context" - "errors" - "os" - "path" - "path/filepath" - "reflect" - "sort" - "strconv" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/filestore" -) - -func TestAll(t *testing.T) { - t.Parallel() - var tests = []struct { - Existing []chronograf.Layout - Err error - }{ - { - Existing: []chronograf.Layout{ - {ID: "1", - Application: "howdy", - }, - {ID: "2", - Application: "doody", - }, - }, - Err: nil, - }, - { - Existing: []chronograf.Layout{}, - Err: nil, - }, - { - Existing: nil, - Err: errors.New("error"), - }, - } - for i, test := range tests { - apps, _ := MockApps(test.Existing, test.Err) - layouts, err := apps.All(context.Background()) - if err != test.Err { - t.Errorf("Test %d: apps all error expected: %v; actual: %v", i, test.Err, err) - } - if !reflect.DeepEqual(layouts, test.Existing) { - t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Existing, layouts) - } - } -} - -func TestAdd(t *testing.T) { - t.Parallel() - var tests = []struct { - Existing []chronograf.Layout - Add chronograf.Layout - ExpectedID string - Err error - }{ - { - Existing: []chronograf.Layout{ - {ID: "1", - Application: "howdy", - }, - {ID: "2", - Application: "doody", - }, - }, - Add: chronograf.Layout{ - Application: "newbie", - }, - ExpectedID: "3", - Err: nil, - }, - { - Existing: []chronograf.Layout{}, - Add: chronograf.Layout{ - Application: "newbie", - }, - ExpectedID: "1", - Err: nil, - }, - { - Existing: nil, - Add: chronograf.Layout{ - Application: "newbie", - }, - ExpectedID: "", - Err: errors.New("error"), - }, - } - for i, test := range tests { - apps, _ := MockApps(test.Existing, test.Err) - layout, err := apps.Add(context.Background(), test.Add) - if err != test.Err { - t.Errorf("Test %d: apps add error expected: %v; actual: %v", i, test.Err, err) - } - - if layout.ID != test.ExpectedID { - t.Errorf("Test %d: Layout ID should be equal; expected %s; actual %s", i, test.ExpectedID, layout.ID) - } - } -} - -func TestDelete(t *testing.T) { - t.Parallel() - var tests = []struct { - Existing []chronograf.Layout - DeleteID string - Expected map[string]chronograf.Layout - Err error - }{ - { - Existing: []chronograf.Layout{ - {ID: "1", - Application: "howdy", - }, - {ID: "2", - Application: "doody", - }, - }, - DeleteID: "1", - Expected: map[string]chronograf.Layout{ - "dir/2.json": {ID: "2", - Application: "doody", - }, - }, - Err: nil, - }, - { - Existing: []chronograf.Layout{}, - DeleteID: "1", - Expected: map[string]chronograf.Layout{}, - Err: chronograf.ErrLayoutNotFound, - }, - { - Existing: nil, - DeleteID: "1", - Expected: map[string]chronograf.Layout{}, - Err: errors.New("error"), - }, - } - for i, test := range tests { - apps, actual := MockApps(test.Existing, test.Err) - err := apps.Delete(context.Background(), chronograf.Layout{ID: test.DeleteID}) - if err != test.Err { - t.Errorf("Test %d: apps delete error expected: %v; actual: %v", i, test.Err, err) - } - if !reflect.DeepEqual(*actual, test.Expected) { - t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Expected, actual) - } - } -} - -func TestGet(t *testing.T) { - t.Parallel() - var tests = []struct { - Existing []chronograf.Layout - ID string - Expected chronograf.Layout - Err error - }{ - { - Existing: []chronograf.Layout{ - {ID: "1", - Application: "howdy", - }, - {ID: "2", - Application: "doody", - }, - }, - ID: "1", - Expected: chronograf.Layout{ - ID: "1", - Application: "howdy", - }, - Err: nil, - }, - { - Existing: []chronograf.Layout{}, - ID: "1", - Expected: chronograf.Layout{}, - Err: chronograf.ErrLayoutNotFound, - }, - { - Existing: nil, - ID: "1", - Expected: chronograf.Layout{}, - Err: chronograf.ErrLayoutNotFound, - }, - } - for i, test := range tests { - apps, _ := MockApps(test.Existing, test.Err) - layout, err := apps.Get(context.Background(), test.ID) - if err != test.Err { - t.Errorf("Test %d: Layouts get error expected: %v; actual: %v", i, test.Err, err) - } - if !reflect.DeepEqual(layout, test.Expected) { - t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Expected, layout) - } - } -} - -func TestUpdate(t *testing.T) { - t.Parallel() - var tests = []struct { - Existing []chronograf.Layout - Update chronograf.Layout - Expected map[string]chronograf.Layout - Err error - }{ - { - Existing: []chronograf.Layout{ - {ID: "1", - Application: "howdy", - }, - {ID: "2", - Application: "doody", - }, - }, - Update: chronograf.Layout{ - ID: "1", - Application: "hello", - Measurement: "measurement", - }, - Expected: map[string]chronograf.Layout{ - "dir/1.json": {ID: "1", - Application: "hello", - Measurement: "measurement", - }, - "dir/2.json": {ID: "2", - Application: "doody", - }, - }, - Err: nil, - }, - { - Existing: []chronograf.Layout{}, - Update: chronograf.Layout{ - ID: "1", - }, - Expected: map[string]chronograf.Layout{}, - Err: chronograf.ErrLayoutNotFound, - }, - { - Existing: nil, - Update: chronograf.Layout{ - ID: "1", - }, - Expected: map[string]chronograf.Layout{}, - Err: chronograf.ErrLayoutNotFound, - }, - } - for i, test := range tests { - apps, actual := MockApps(test.Existing, test.Err) - err := apps.Update(context.Background(), test.Update) - if err != test.Err { - t.Errorf("Test %d: Layouts get error expected: %v; actual: %v", i, test.Err, err) - } - if !reflect.DeepEqual(*actual, test.Expected) { - t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Expected, actual) - } - } -} - -type MockFileInfo struct { - name string -} - -func (m *MockFileInfo) Name() string { - return m.name -} - -func (m *MockFileInfo) Size() int64 { - return 0 -} - -func (m *MockFileInfo) Mode() os.FileMode { - return 0666 -} - -func (m *MockFileInfo) ModTime() time.Time { - return time.Now() -} - -func (m *MockFileInfo) IsDir() bool { - return false -} - -func (m *MockFileInfo) Sys() interface{} { - return nil -} - -type MockFileInfos []os.FileInfo - -func (m MockFileInfos) Len() int { return len(m) } -func (m MockFileInfos) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m MockFileInfos) Less(i, j int) bool { return m[i].Name() < m[j].Name() } - -type MockID struct { - id int -} - -func (m *MockID) Generate() (string, error) { - m.id++ - return strconv.Itoa(m.id), nil -} - -func MockApps(existing []chronograf.Layout, expected error) (filestore.Apps, *map[string]chronograf.Layout) { - layouts := map[string]chronograf.Layout{} - fileName := func(dir string, layout chronograf.Layout) string { - return path.Join(dir, layout.ID+".json") - } - dir := "dir" - for _, l := range existing { - layouts[fileName(dir, l)] = l - } - load := func(file string) (chronograf.Layout, error) { - if expected != nil { - return chronograf.Layout{}, expected - } - - l, ok := layouts[file] - if !ok { - return chronograf.Layout{}, chronograf.ErrLayoutNotFound - } - return l, nil - } - - create := func(file string, layout chronograf.Layout) error { - if expected != nil { - return expected - } - layouts[file] = layout - return nil - } - - readDir := func(dirname string) ([]os.FileInfo, error) { - if expected != nil { - return nil, expected - } - info := []os.FileInfo{} - for k := range layouts { - info = append(info, &MockFileInfo{filepath.Base(k)}) - } - sort.Sort(MockFileInfos(info)) - return info, nil - } - - remove := func(name string) error { - if expected != nil { - return expected - } - if _, ok := layouts[name]; !ok { - return chronograf.ErrLayoutNotFound - } - delete(layouts, name) - return nil - } - - return filestore.Apps{ - Dir: dir, - Load: load, - Filename: fileName, - Create: create, - ReadDir: readDir, - Remove: remove, - IDs: &MockID{ - id: len(existing), - }, - Logger: &chronograf.NoopLogger{}, - }, &layouts -} diff --git a/chronograf/filestore/dashboards.go b/chronograf/filestore/dashboards.go deleted file mode 100644 index e3eae92dbae..00000000000 --- a/chronograf/filestore/dashboards.go +++ /dev/null @@ -1,211 +0,0 @@ -package filestore - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/pkg/fs" -) - -// DashExt is the the file extension searched for in the directory for dashboard files -const DashExt = ".dashboard" - -var _ chronograf.DashboardsStore = &Dashboards{} - -// Dashboards are JSON dashboards stored in the filesystem -type Dashboards struct { - Dir string // Dir is the directory containing the dashboards. - Load func(string, interface{}) error // Load loads string name and dashboard passed in as interface - Create func(string, interface{}) error // Create will write dashboard to file. - ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename. - Remove func(name string) error // Remove file - IDs chronograf.ID // IDs generate unique ids for new dashboards - Logger chronograf.Logger -} - -// NewDashboards constructs a dashboard store wrapping a file system directory -func NewDashboards(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.DashboardsStore { - return &Dashboards{ - Dir: dir, - Load: load, - Create: create, - ReadDir: ioutil.ReadDir, - Remove: os.Remove, - IDs: ids, - Logger: logger, - } -} - -func dashboardFile(dir string, dashboard chronograf.Dashboard) string { - base := fmt.Sprintf("%s%s", dashboard.Name, DashExt) - return path.Join(dir, base) -} - -func load(name string, resource interface{}) error { - octets, err := templatedFromEnv(name) - if err != nil { - return fmt.Errorf("resource %s not found", name) - } - - return json.Unmarshal(octets, resource) -} - -func create(file string, resource interface{}) error { - h, err := fs.CreateFile(file) - if err != nil { - return err - } - defer h.Close() - - octets, err := json.MarshalIndent(resource, " ", " ") - if err != nil { - return err - } - - _, err = h.Write(octets) - return err -} - -// All returns all dashboards from the directory -func (d *Dashboards) All(ctx context.Context) ([]chronograf.Dashboard, error) { - files, err := d.ReadDir(d.Dir) - if err != nil { - return nil, err - } - - dashboards := []chronograf.Dashboard{} - for _, file := range files { - if path.Ext(file.Name()) != DashExt { - continue - } - var dashboard chronograf.Dashboard - if err := d.Load(path.Join(d.Dir, file.Name()), &dashboard); err != nil { - continue // We want to load all files we can. - } else { - dashboards = append(dashboards, dashboard) - } - } - return dashboards, nil -} - -// Add creates a new dashboard within the directory -func (d *Dashboards) Add(ctx context.Context, dashboard chronograf.Dashboard) (chronograf.Dashboard, error) { - genID, err := d.IDs.Generate() - if err != nil { - d.Logger. - WithField("component", "dashboard"). - Error("Unable to generate ID") - return chronograf.Dashboard{}, err - } - - id, err := strconv.Atoi(genID) - if err != nil { - d.Logger. - WithField("component", "dashboard"). - Error("Unable to convert ID") - return chronograf.Dashboard{}, err - } - - dashboard.ID = chronograf.DashboardID(id) - - file := dashboardFile(d.Dir, dashboard) - if err = d.Create(file, dashboard); err != nil { - if err == chronograf.ErrDashboardInvalid { - d.Logger. - WithField("component", "dashboard"). - WithField("name", file). - Error("Invalid Dashboard: ", err) - } else { - d.Logger. - WithField("component", "dashboard"). - WithField("name", file). - Error("Unable to write dashboard:", err) - } - return chronograf.Dashboard{}, err - } - return dashboard, nil -} - -// Delete removes a dashboard file from the directory -func (d *Dashboards) Delete(ctx context.Context, dashboard chronograf.Dashboard) error { - _, file, err := d.idToFile(dashboard.ID) - if err != nil { - return err - } - - if err := d.Remove(file); err != nil { - d.Logger. - WithField("component", "dashboard"). - WithField("name", file). - Error("Unable to remove dashboard:", err) - return err - } - return nil -} - -// Get returns a dashboard file from the dashboard directory -func (d *Dashboards) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - board, file, err := d.idToFile(id) - if err != nil { - if err == chronograf.ErrDashboardNotFound { - d.Logger. - WithField("component", "dashboard"). - WithField("name", file). - Error("Unable to read file") - } else if err == chronograf.ErrDashboardInvalid { - d.Logger. - WithField("component", "dashboard"). - WithField("name", file). - Error("File is not a dashboard") - } - return chronograf.Dashboard{}, err - } - return board, nil -} - -// Update replaces a dashboard from the file system directory -func (d *Dashboards) Update(ctx context.Context, dashboard chronograf.Dashboard) error { - board, _, err := d.idToFile(dashboard.ID) - if err != nil { - return err - } - - if err := d.Delete(ctx, board); err != nil { - return err - } - file := dashboardFile(d.Dir, dashboard) - return d.Create(file, dashboard) -} - -// idToFile takes an id and finds the associated filename -func (d *Dashboards) idToFile(id chronograf.DashboardID) (chronograf.Dashboard, string, error) { - // Because the entire dashboard information is not known at this point, we need - // to try to find the name of the file through matching the ID in the dashboard - // content with the ID passed. - files, err := d.ReadDir(d.Dir) - if err != nil { - return chronograf.Dashboard{}, "", err - } - - for _, f := range files { - if path.Ext(f.Name()) != DashExt { - continue - } - file := path.Join(d.Dir, f.Name()) - var dashboard chronograf.Dashboard - if err := d.Load(file, &dashboard); err != nil { - return chronograf.Dashboard{}, "", err - } - if dashboard.ID == id { - return dashboard, file, nil - } - } - - return chronograf.Dashboard{}, "", chronograf.ErrDashboardNotFound -} diff --git a/chronograf/filestore/environ.go b/chronograf/filestore/environ.go deleted file mode 100644 index 091e179e802..00000000000 --- a/chronograf/filestore/environ.go +++ /dev/null @@ -1,24 +0,0 @@ -package filestore - -import ( - "os" - "strings" -) - -var env map[string]string - -// environ returns a map of all environment variables in the running process -func environ() map[string]string { - if env == nil { - env = make(map[string]string) - envVars := os.Environ() - for _, envVar := range envVars { - kv := strings.SplitN(envVar, "=", 2) - if len(kv) != 2 { - continue - } - env[kv[0]] = kv[1] - } - } - return env -} diff --git a/chronograf/filestore/environ_test.go b/chronograf/filestore/environ_test.go deleted file mode 100644 index 6894848062b..00000000000 --- a/chronograf/filestore/environ_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package filestore - -import ( - "os" - "testing" -) - -func Test_environ(t *testing.T) { - tests := []struct { - name string - key string - value string - }{ - { - name: "environment variable is returned", - key: "CHRONOGRAF_TEST_ENVIRON", - value: "howdy", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - os.Setenv(tt.key, tt.value) - got := environ() - if v, ok := got[tt.key]; !ok || v != tt.value { - t.Errorf("environ() = %v, want %v", v, tt.value) - } - }) - } -} diff --git a/chronograf/filestore/kapacitors.go b/chronograf/filestore/kapacitors.go deleted file mode 100644 index 6b77c82ef0d..00000000000 --- a/chronograf/filestore/kapacitors.go +++ /dev/null @@ -1,186 +0,0 @@ -package filestore - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// KapExt is the the file extension searched for in the directory for kapacitor files -const KapExt = ".kap" - -var _ chronograf.ServersStore = &Kapacitors{} - -// Kapacitors are JSON kapacitors stored in the filesystem -type Kapacitors struct { - Dir string // Dir is the directory containing the kapacitors. - Load func(string, interface{}) error // Load loads string name and dashboard passed in as interface - Create func(string, interface{}) error // Create will write kapacitor to file. - ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename. - Remove func(name string) error // Remove file - IDs chronograf.ID // IDs generate unique ids for new kapacitors - Logger chronograf.Logger -} - -// NewKapacitors constructs a kapacitor store wrapping a file system directory -func NewKapacitors(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.ServersStore { - return &Kapacitors{ - Dir: dir, - Load: load, - Create: create, - ReadDir: ioutil.ReadDir, - Remove: os.Remove, - IDs: ids, - Logger: logger, - } -} - -func kapacitorFile(dir string, kapacitor chronograf.Server) string { - base := fmt.Sprintf("%s%s", kapacitor.Name, KapExt) - return path.Join(dir, base) -} - -// All returns all kapacitors from the directory -func (d *Kapacitors) All(ctx context.Context) ([]chronograf.Server, error) { - files, err := d.ReadDir(d.Dir) - if err != nil { - return nil, err - } - - kapacitors := []chronograf.Server{} - for _, file := range files { - if path.Ext(file.Name()) != KapExt { - continue - } - var kapacitor chronograf.Server - if err := d.Load(path.Join(d.Dir, file.Name()), &kapacitor); err != nil { - var fmtErr = fmt.Errorf("error loading kapacitor configuration from %v:\n%v", path.Join(d.Dir, file.Name()), err) - d.Logger.Error(fmtErr) - continue // We want to load all files we can. - } else { - kapacitors = append(kapacitors, kapacitor) - } - } - return kapacitors, nil -} - -// Add creates a new kapacitor within the directory -func (d *Kapacitors) Add(ctx context.Context, kapacitor chronograf.Server) (chronograf.Server, error) { - genID, err := d.IDs.Generate() - if err != nil { - d.Logger. - WithField("component", "kapacitor"). - Error("Unable to generate ID") - return chronograf.Server{}, err - } - - id, err := strconv.Atoi(genID) - if err != nil { - d.Logger. - WithField("component", "kapacitor"). - Error("Unable to convert ID") - return chronograf.Server{}, err - } - - kapacitor.ID = id - - file := kapacitorFile(d.Dir, kapacitor) - if err = d.Create(file, kapacitor); err != nil { - if err == chronograf.ErrServerInvalid { - d.Logger. - WithField("component", "kapacitor"). - WithField("name", file). - Error("Invalid Server: ", err) - } else { - d.Logger. - WithField("component", "kapacitor"). - WithField("name", file). - Error("Unable to write kapacitor:", err) - } - return chronograf.Server{}, err - } - return kapacitor, nil -} - -// Delete removes a kapacitor file from the directory -func (d *Kapacitors) Delete(ctx context.Context, kapacitor chronograf.Server) error { - _, file, err := d.idToFile(kapacitor.ID) - if err != nil { - return err - } - - if err := d.Remove(file); err != nil { - d.Logger. - WithField("component", "kapacitor"). - WithField("name", file). - Error("Unable to remove kapacitor:", err) - return err - } - return nil -} - -// Get returns a kapacitor file from the kapacitor directory -func (d *Kapacitors) Get(ctx context.Context, id int) (chronograf.Server, error) { - board, file, err := d.idToFile(id) - if err != nil { - if err == chronograf.ErrServerNotFound { - d.Logger. - WithField("component", "kapacitor"). - WithField("name", file). - Error("Unable to read file") - } else if err == chronograf.ErrServerInvalid { - d.Logger. - WithField("component", "kapacitor"). - WithField("name", file). - Error("File is not a kapacitor") - } - return chronograf.Server{}, err - } - return board, nil -} - -// Update replaces a kapacitor from the file system directory -func (d *Kapacitors) Update(ctx context.Context, kapacitor chronograf.Server) error { - board, _, err := d.idToFile(kapacitor.ID) - if err != nil { - return err - } - - if err := d.Delete(ctx, board); err != nil { - return err - } - file := kapacitorFile(d.Dir, kapacitor) - return d.Create(file, kapacitor) -} - -// idToFile takes an id and finds the associated filename -func (d *Kapacitors) idToFile(id int) (chronograf.Server, string, error) { - // Because the entire kapacitor information is not known at this point, we need - // to try to find the name of the file through matching the ID in the kapacitor - // content with the ID passed. - files, err := d.ReadDir(d.Dir) - if err != nil { - return chronograf.Server{}, "", err - } - - for _, f := range files { - if path.Ext(f.Name()) != KapExt { - continue - } - file := path.Join(d.Dir, f.Name()) - var kapacitor chronograf.Server - if err := d.Load(file, &kapacitor); err != nil { - return chronograf.Server{}, "", err - } - if kapacitor.ID == id { - return kapacitor, file, nil - } - } - - return chronograf.Server{}, "", chronograf.ErrServerNotFound -} diff --git a/chronograf/filestore/organizations.go b/chronograf/filestore/organizations.go deleted file mode 100644 index 41aaadd1b92..00000000000 --- a/chronograf/filestore/organizations.go +++ /dev/null @@ -1,117 +0,0 @@ -package filestore - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// OrgExt is the the file extension searched for in the directory for org files -const OrgExt = ".org" - -var _ chronograf.OrganizationsStore = (*Organizations)(nil) - -// Organizations are JSON orgs stored in the filesystem -type Organizations struct { - Dir string // Dir is the directory containing the orgs. - Load func(string, interface{}) error // Load loads string name and org passed in as interface - ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename. - Logger chronograf.Logger -} - -// NewOrganizations constructs a org store wrapping a file system directory -func NewOrganizations(dir string, logger chronograf.Logger) chronograf.OrganizationsStore { - return &Organizations{ - Dir: dir, - Load: load, - ReadDir: ioutil.ReadDir, - Logger: logger, - } -} - -// All returns all orgs from the directory -func (o *Organizations) All(ctx context.Context) ([]chronograf.Organization, error) { - files, err := o.ReadDir(o.Dir) - if err != nil { - return nil, err - } - - orgs := []chronograf.Organization{} - for _, file := range files { - if path.Ext(file.Name()) != OrgExt { - continue - } - var org chronograf.Organization - if err := o.Load(path.Join(o.Dir, file.Name()), &org); err != nil { - continue // We want to load all files we can. - } else { - orgs = append(orgs, org) - } - } - return orgs, nil -} - -// Get returns a org file from the org directory -func (o *Organizations) Get(ctx context.Context, query chronograf.OrganizationQuery) (*chronograf.Organization, error) { - org, _, err := o.findOrg(query) - return org, err -} - -// Add is not allowed for the filesystem organization store -func (o *Organizations) Add(ctx context.Context, org *chronograf.Organization) (*chronograf.Organization, error) { - return nil, fmt.Errorf("unable to add organizations to the filesystem") -} - -// Delete is not allowed for the filesystem organization store -func (o *Organizations) Delete(ctx context.Context, org *chronograf.Organization) error { - return fmt.Errorf("unable to delete an organization from the filesystem") -} - -// Update is not allowed for the filesystem organization store -func (o *Organizations) Update(ctx context.Context, org *chronograf.Organization) error { - return fmt.Errorf("unable to update organizations on the filesystem") -} - -// CreateDefault is not allowed for the filesystem organization store -func (o *Organizations) CreateDefault(ctx context.Context) error { - return fmt.Errorf("unable to create default organizations on the filesystem") -} - -// DefaultOrganization is not allowed for the filesystem organization store -func (o *Organizations) DefaultOrganization(ctx context.Context) (*chronograf.Organization, error) { - return nil, fmt.Errorf("unable to get default organizations from the filestore") -} - -// findOrg takes an OrganizationQuery and finds the associated filename -func (o *Organizations) findOrg(query chronograf.OrganizationQuery) (*chronograf.Organization, string, error) { - // Because the entire org information is not known at this point, we need - // to try to find the name of the file through matching the ID or name in the org - // content with the ID passed. - files, err := o.ReadDir(o.Dir) - if err != nil { - return nil, "", err - } - - for _, f := range files { - if path.Ext(f.Name()) != OrgExt { - continue - } - file := path.Join(o.Dir, f.Name()) - var org chronograf.Organization - if err := o.Load(file, &org); err != nil { - return nil, "", err - } - if query.ID != nil && org.ID == *query.ID { - return &org, file, nil - } - if query.Name != nil && org.Name == *query.Name { - return &org, file, nil - } - } - - return nil, "", chronograf.ErrOrganizationNotFound -} diff --git a/chronograf/filestore/sources.go b/chronograf/filestore/sources.go deleted file mode 100644 index 8c970e673ad..00000000000 --- a/chronograf/filestore/sources.go +++ /dev/null @@ -1,186 +0,0 @@ -package filestore - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// SrcExt is the the file extension searched for in the directory for source files -const SrcExt = ".src" - -var _ chronograf.SourcesStore = &Sources{} - -// Sources are JSON sources stored in the filesystem -type Sources struct { - Dir string // Dir is the directory containing the sources. - Load func(string, interface{}) error // Load loads string name and dashboard passed in as interface - Create func(string, interface{}) error // Create will write source to file. - ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename. - Remove func(name string) error // Remove file - IDs chronograf.ID // IDs generate unique ids for new sources - Logger chronograf.Logger -} - -// NewSources constructs a source store wrapping a file system directory -func NewSources(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.SourcesStore { - return &Sources{ - Dir: dir, - Load: load, - Create: create, - ReadDir: ioutil.ReadDir, - Remove: os.Remove, - IDs: ids, - Logger: logger, - } -} - -func sourceFile(dir string, source chronograf.Source) string { - base := fmt.Sprintf("%s%s", source.Name, SrcExt) - return path.Join(dir, base) -} - -// All returns all sources from the directory -func (d *Sources) All(ctx context.Context) ([]chronograf.Source, error) { - files, err := d.ReadDir(d.Dir) - if err != nil { - return nil, err - } - - sources := []chronograf.Source{} - for _, file := range files { - if path.Ext(file.Name()) != SrcExt { - continue - } - var source chronograf.Source - if err := d.Load(path.Join(d.Dir, file.Name()), &source); err != nil { - var fmtErr = fmt.Errorf("error loading source configuration from %v:\n%v", path.Join(d.Dir, file.Name()), err) - d.Logger.Error(fmtErr) - continue // We want to load all files we can. - } else { - sources = append(sources, source) - } - } - return sources, nil -} - -// Add creates a new source within the directory -func (d *Sources) Add(ctx context.Context, source chronograf.Source) (chronograf.Source, error) { - genID, err := d.IDs.Generate() - if err != nil { - d.Logger. - WithField("component", "source"). - Error("Unable to generate ID") - return chronograf.Source{}, err - } - - id, err := strconv.Atoi(genID) - if err != nil { - d.Logger. - WithField("component", "source"). - Error("Unable to convert ID") - return chronograf.Source{}, err - } - - source.ID = id - - file := sourceFile(d.Dir, source) - if err = d.Create(file, source); err != nil { - if err == chronograf.ErrSourceInvalid { - d.Logger. - WithField("component", "source"). - WithField("name", file). - Error("Invalid Source: ", err) - } else { - d.Logger. - WithField("component", "source"). - WithField("name", file). - Error("Unable to write source:", err) - } - return chronograf.Source{}, err - } - return source, nil -} - -// Delete removes a source file from the directory -func (d *Sources) Delete(ctx context.Context, source chronograf.Source) error { - _, file, err := d.idToFile(source.ID) - if err != nil { - return err - } - - if err := d.Remove(file); err != nil { - d.Logger. - WithField("component", "source"). - WithField("name", file). - Error("Unable to remove source:", err) - return err - } - return nil -} - -// Get returns a source file from the source directory -func (d *Sources) Get(ctx context.Context, id int) (chronograf.Source, error) { - board, file, err := d.idToFile(id) - if err != nil { - if err == chronograf.ErrSourceNotFound { - d.Logger. - WithField("component", "source"). - WithField("name", file). - Error("Unable to read file") - } else if err == chronograf.ErrSourceInvalid { - d.Logger. - WithField("component", "source"). - WithField("name", file). - Error("File is not a source") - } - return chronograf.Source{}, err - } - return board, nil -} - -// Update replaces a source from the file system directory -func (d *Sources) Update(ctx context.Context, source chronograf.Source) error { - board, _, err := d.idToFile(source.ID) - if err != nil { - return err - } - - if err := d.Delete(ctx, board); err != nil { - return err - } - file := sourceFile(d.Dir, source) - return d.Create(file, source) -} - -// idToFile takes an id and finds the associated filename -func (d *Sources) idToFile(id int) (chronograf.Source, string, error) { - // Because the entire source information is not known at this point, we need - // to try to find the name of the file through matching the ID in the source - // content with the ID passed. - files, err := d.ReadDir(d.Dir) - if err != nil { - return chronograf.Source{}, "", err - } - - for _, f := range files { - if path.Ext(f.Name()) != SrcExt { - continue - } - file := path.Join(d.Dir, f.Name()) - var source chronograf.Source - if err := d.Load(file, &source); err != nil { - return chronograf.Source{}, "", err - } - if source.ID == id { - return source, file, nil - } - } - - return chronograf.Source{}, "", chronograf.ErrSourceNotFound -} diff --git a/chronograf/filestore/templates.go b/chronograf/filestore/templates.go deleted file mode 100644 index fc0e1ffc464..00000000000 --- a/chronograf/filestore/templates.go +++ /dev/null @@ -1,28 +0,0 @@ -package filestore - -import ( - "bytes" - "html/template" -) - -// templated returns all files templated using data -func templated(data interface{}, filenames ...string) ([]byte, error) { - t, err := template.ParseFiles(filenames...) - if err != nil { - return nil, err - } - var b bytes.Buffer - // If a key in the file exists but is not in the data we - // immediately fail with a missing key error - err = t.Option("missingkey=error").Execute(&b, data) - if err != nil { - return nil, err - } - - return b.Bytes(), nil -} - -// templatedFromEnv returns all files templated against environment variables -func templatedFromEnv(filenames ...string) ([]byte, error) { - return templated(environ(), filenames...) -} diff --git a/chronograf/filestore/templates_test.go b/chronograf/filestore/templates_test.go deleted file mode 100644 index 5d5b82f5df3..00000000000 --- a/chronograf/filestore/templates_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package filestore - -import ( - "io/ioutil" - "os" - "reflect" - "testing" -) - -func Test_templated(t *testing.T) { - tests := []struct { - name string - content []string - data interface{} - want []byte - wantErr bool - }{ - { - name: "files with templates are rendered correctly", - content: []string{ - "{{ .MYVAR }}", - }, - data: map[string]string{ - "MYVAR": "howdy", - }, - want: []byte("howdy"), - }, - { - name: "missing key gives an error", - content: []string{ - "{{ .MYVAR }}", - }, - wantErr: true, - }, - { - name: "no files make me an error!", - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - filenames := make([]string, len(tt.content)) - for i, c := range tt.content { - f, err := ioutil.TempFile("", "") - if err != nil { - t.Fatal(err) - } - if _, err := f.Write([]byte(c)); err != nil { - t.Fatal(err) - } - filenames[i] = f.Name() - defer os.Remove(f.Name()) - } - got, err := templated(tt.data, filenames...) - if (err != nil) != tt.wantErr { - t.Errorf("templated() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("templated() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/id/time.go b/chronograf/id/time.go deleted file mode 100644 index 6e50e69f8b0..00000000000 --- a/chronograf/id/time.go +++ /dev/null @@ -1,25 +0,0 @@ -package id - -import ( - "strconv" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// tm generates an id based on current time -type tm struct { - Now func() time.Time -} - -// NewTime builds a chronograf.ID generator based on current time -func NewTime() chronograf.ID { - return &tm{ - Now: time.Now, - } -} - -// Generate creates a string based on the current time as an integer -func (i *tm) Generate() (string, error) { - return strconv.Itoa(int(i.Now().Unix())), nil -} diff --git a/chronograf/id/uuid.go b/chronograf/id/uuid.go deleted file mode 100644 index 7ff81074d6a..00000000000 --- a/chronograf/id/uuid.go +++ /dev/null @@ -1,20 +0,0 @@ -package id - -import ( - "github.com/influxdata/influxdb/v2/chronograf" - uuid "github.com/satori/go.uuid" -) - -var _ chronograf.ID = &UUID{} - -// UUID generates a V4 uuid -type UUID struct{} - -// Generate creates a UUID v4 string -func (i *UUID) Generate() (string, error) { - uuid, err := uuid.NewV4() - if err != nil { - return "", err - } - return uuid.String(), nil -} diff --git a/chronograf/influx/annotations.go b/chronograf/influx/annotations.go deleted file mode 100644 index 52223321037..00000000000 --- a/chronograf/influx/annotations.go +++ /dev/null @@ -1,270 +0,0 @@ -package influx - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/id" -) - -const ( - // AllAnnotations returns all annotations from the chronograf database - AllAnnotations = `SELECT "start_time", "modified_time_ns", "text", "type", "id" FROM "annotations" WHERE "deleted"=false AND time >= %dns and "start_time" <= %d ORDER BY time DESC` - // GetAnnotationID returns all annotations from the chronograf database where id is %s - GetAnnotationID = `SELECT "start_time", "modified_time_ns", "text", "type", "id" FROM "annotations" WHERE "id"='%s' AND "deleted"=false ORDER BY time DESC` - // AnnotationsDB is chronograf. Perhaps later we allow this to be changed - AnnotationsDB = "chronograf" - // DefaultRP is autogen. Perhaps later we allow this to be changed - DefaultRP = "autogen" - // DefaultMeasurement is annotations. - DefaultMeasurement = "annotations" -) - -var _ chronograf.AnnotationStore = &AnnotationStore{} - -// AnnotationStore stores annotations within InfluxDB -type AnnotationStore struct { - client chronograf.TimeSeries - id chronograf.ID - now Now -} - -// NewAnnotationStore constructs an annoation store with a client -func NewAnnotationStore(client chronograf.TimeSeries) *AnnotationStore { - return &AnnotationStore{ - client: client, - id: &id.UUID{}, - now: time.Now, - } -} - -// All lists all Annotations -func (a *AnnotationStore) All(ctx context.Context, start, stop time.Time) ([]chronograf.Annotation, error) { - return a.queryAnnotations(ctx, fmt.Sprintf(AllAnnotations, start.UnixNano(), stop.UnixNano())) -} - -// Get retrieves an annotation -func (a *AnnotationStore) Get(ctx context.Context, id string) (*chronograf.Annotation, error) { - annos, err := a.queryAnnotations(ctx, fmt.Sprintf(GetAnnotationID, id)) - if err != nil { - return nil, err - } - if len(annos) == 0 { - return nil, chronograf.ErrAnnotationNotFound - } - return &annos[0], nil -} - -// Add creates a new annotation in the store -func (a *AnnotationStore) Add(ctx context.Context, anno *chronograf.Annotation) (*chronograf.Annotation, error) { - var err error - anno.ID, err = a.id.Generate() - if err != nil { - return nil, err - } - return anno, a.client.Write(ctx, []chronograf.Point{ - toPoint(anno, a.now()), - }) -} - -// Delete removes the annotation from the store -func (a *AnnotationStore) Delete(ctx context.Context, id string) error { - cur, err := a.Get(ctx, id) - if err != nil { - return err - } - return a.client.Write(ctx, []chronograf.Point{ - toDeletedPoint(cur, a.now()), - }) -} - -// Update replaces annotation; if the annotation's time is different, it -// also removes the previous annotation -func (a *AnnotationStore) Update(ctx context.Context, anno *chronograf.Annotation) error { - cur, err := a.Get(ctx, anno.ID) - if err != nil { - return err - } - - if err := a.client.Write(ctx, []chronograf.Point{toPoint(anno, a.now())}); err != nil { - return err - } - - // If the updated annotation has a different time, then, we must - // delete the previous annotation - if !cur.EndTime.Equal(anno.EndTime) { - return a.client.Write(ctx, []chronograf.Point{ - toDeletedPoint(cur, a.now()), - }) - } - return nil -} - -// queryAnnotations queries the chronograf db and produces all annotations -func (a *AnnotationStore) queryAnnotations(ctx context.Context, query string) ([]chronograf.Annotation, error) { - res, err := a.client.Query(ctx, chronograf.Query{ - Command: query, - DB: AnnotationsDB, - Epoch: "ns", - }) - if err != nil { - return nil, err - } - octets, err := res.MarshalJSON() - if err != nil { - return nil, err - } - - results := influxResults{} - d := json.NewDecoder(bytes.NewReader(octets)) - d.UseNumber() - if err := d.Decode(&results); err != nil { - return nil, err - } - return results.Annotations() -} - -func toPoint(anno *chronograf.Annotation, now time.Time) chronograf.Point { - return chronograf.Point{ - Database: AnnotationsDB, - RetentionPolicy: DefaultRP, - Measurement: DefaultMeasurement, - Time: anno.EndTime.UnixNano(), - Tags: map[string]string{ - "id": anno.ID, - }, - Fields: map[string]interface{}{ - "deleted": false, - "start_time": anno.StartTime.UnixNano(), - "modified_time_ns": int64(now.UnixNano()), - "text": anno.Text, - "type": anno.Type, - }, - } -} - -func toDeletedPoint(anno *chronograf.Annotation, now time.Time) chronograf.Point { - return chronograf.Point{ - Database: AnnotationsDB, - RetentionPolicy: DefaultRP, - Measurement: DefaultMeasurement, - Time: anno.EndTime.UnixNano(), - Tags: map[string]string{ - "id": anno.ID, - }, - Fields: map[string]interface{}{ - "deleted": true, - "start_time": int64(0), - "modified_time_ns": int64(now.UnixNano()), - "text": "", - "type": "", - }, - } -} - -type value []interface{} - -func (v value) Int64(idx int) (int64, error) { - if idx >= len(v) { - return 0, fmt.Errorf("index %d does not exist in values", idx) - } - n, ok := v[idx].(json.Number) - if !ok { - return 0, fmt.Errorf("value at index %d is not int64, but, %T", idx, v[idx]) - } - return n.Int64() -} - -func (v value) Time(idx int) (time.Time, error) { - tm, err := v.Int64(idx) - if err != nil { - return time.Time{}, err - } - return time.Unix(0, tm), nil -} - -func (v value) String(idx int) (string, error) { - if idx >= len(v) { - return "", fmt.Errorf("index %d does not exist in values", idx) - } - str, ok := v[idx].(string) - if !ok { - return "", fmt.Errorf("value at index %d is not string, but, %T", idx, v[idx]) - } - return str, nil -} - -type influxResults []struct { - Series []struct { - Values []value `json:"values"` - } `json:"series"` -} - -// annotationResult is an intermediate struct to track the latest modified -// time of an annotation -type annotationResult struct { - chronograf.Annotation - // modTime is bookkeeping to handle the case when an update fails; the latest - // modTime will be the record returned - modTime int64 -} - -// Annotations converts AllAnnotations query to annotations -func (r *influxResults) Annotations() (res []chronograf.Annotation, err error) { - annos := map[string]annotationResult{} - for _, u := range *r { - for _, s := range u.Series { - for _, v := range s.Values { - anno := annotationResult{} - - if anno.EndTime, err = v.Time(0); err != nil { - return - } - - if anno.StartTime, err = v.Time(1); err != nil { - return - } - - if anno.modTime, err = v.Int64(2); err != nil { - return - } - - if anno.Text, err = v.String(3); err != nil { - return - } - - if anno.Type, err = v.String(4); err != nil { - return - } - - if anno.ID, err = v.String(5); err != nil { - return - } - - // If there are two annotations with the same id, take - // the annotation with the latest modification time - // This is to prevent issues when an update or delete fails. - // Updates and deletes are multiple step queries. - prev, ok := annos[anno.ID] - if !ok || anno.modTime > prev.modTime { - annos[anno.ID] = anno - } - } - } - } - res = []chronograf.Annotation{} - for _, a := range annos { - res = append(res, a.Annotation) - } - - sort.Slice(res, func(i int, j int) bool { - return res[i].StartTime.Before(res[j].StartTime) || res[i].ID < res[j].ID - }) - - return res, err -} diff --git a/chronograf/influx/annotations_test.go b/chronograf/influx/annotations_test.go deleted file mode 100644 index db356c0b04e..00000000000 --- a/chronograf/influx/annotations_test.go +++ /dev/null @@ -1,665 +0,0 @@ -package influx - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func Test_toPoint(t *testing.T) { - tests := []struct { - name string - anno *chronograf.Annotation - now time.Time - want chronograf.Point - }{ - 0: { - name: "convert annotation to point w/o start and end times", - anno: &chronograf.Annotation{ - ID: "1", - Text: "mytext", - Type: "mytype", - }, - now: time.Unix(0, 0), - want: chronograf.Point{ - Database: AnnotationsDB, - RetentionPolicy: DefaultRP, - Measurement: DefaultMeasurement, - Time: time.Time{}.UnixNano(), - Tags: map[string]string{ - "id": "1", - }, - Fields: map[string]interface{}{ - "deleted": false, - "start_time": time.Time{}.UnixNano(), - "modified_time_ns": int64(time.Unix(0, 0).UnixNano()), - "text": "mytext", - "type": "mytype", - }, - }, - }, - 1: { - name: "convert annotation to point with start/end time", - anno: &chronograf.Annotation{ - ID: "1", - Text: "mytext", - Type: "mytype", - StartTime: time.Unix(100, 0), - EndTime: time.Unix(200, 0), - }, - now: time.Unix(0, 0), - want: chronograf.Point{ - Database: AnnotationsDB, - RetentionPolicy: DefaultRP, - Measurement: DefaultMeasurement, - Time: time.Unix(200, 0).UnixNano(), - Tags: map[string]string{ - "id": "1", - }, - Fields: map[string]interface{}{ - "deleted": false, - "start_time": time.Unix(100, 0).UnixNano(), - "modified_time_ns": int64(time.Unix(0, 0).UnixNano()), - "text": "mytext", - "type": "mytype", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := toPoint(tt.anno, tt.now); !reflect.DeepEqual(got, tt.want) { - t.Errorf("toPoint() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_toDeletedPoint(t *testing.T) { - tests := []struct { - name string - anno *chronograf.Annotation - now time.Time - want chronograf.Point - }{ - 0: { - name: "convert annotation to point w/o start and end times", - anno: &chronograf.Annotation{ - ID: "1", - EndTime: time.Unix(0, 0), - }, - now: time.Unix(0, 0), - want: chronograf.Point{ - Database: AnnotationsDB, - RetentionPolicy: DefaultRP, - Measurement: DefaultMeasurement, - Time: 0, - Tags: map[string]string{ - "id": "1", - }, - Fields: map[string]interface{}{ - "deleted": true, - "start_time": int64(0), - "modified_time_ns": int64(0), - "text": "", - "type": "", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := toDeletedPoint(tt.anno, tt.now); !cmp.Equal(got, tt.want) { - t.Errorf("toDeletedPoint() = %s", cmp.Diff(got, tt.want)) - } - }) - } -} - -func Test_value_Int64(t *testing.T) { - tests := []struct { - name string - v value - idx int - want int64 - wantErr bool - }{ - { - name: "index out of range returns error", - idx: 1, - wantErr: true, - }, - { - name: "converts a string to int64", - v: value{ - json.Number("1"), - }, - idx: 0, - want: int64(1), - }, - { - name: "when not a json.Number, return error", - v: value{ - "howdy", - }, - idx: 0, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.v.Int64(tt.idx) - if (err != nil) != tt.wantErr { - t.Errorf("value.Int64() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("value.Int64() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_value_Time(t *testing.T) { - tests := []struct { - name string - v value - idx int - want time.Time - wantErr bool - }{ - { - name: "index out of range returns error", - idx: 1, - wantErr: true, - }, - { - name: "converts a string to int64", - v: value{ - json.Number("1"), - }, - idx: 0, - want: time.Unix(0, 1), - }, - { - name: "when not a json.Number, return error", - v: value{ - "howdy", - }, - idx: 0, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.v.Time(tt.idx) - if (err != nil) != tt.wantErr { - t.Errorf("value.Time() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("value.Time() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_value_String(t *testing.T) { - tests := []struct { - name string - v value - idx int - want string - wantErr bool - }{ - { - name: "index out of range returns error", - idx: 1, - wantErr: true, - }, - { - name: "converts a string", - v: value{ - "howdy", - }, - idx: 0, - want: "howdy", - }, - { - name: "when not a string, return error", - v: value{ - 0, - }, - idx: 0, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.v.String(tt.idx) - if (err != nil) != tt.wantErr { - t.Errorf("value.String() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("value.String() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestAnnotationStore_queryAnnotations(t *testing.T) { - type args struct { - ctx context.Context - query string - } - tests := []struct { - name string - client chronograf.TimeSeries - args args - want []chronograf.Annotation - wantErr bool - }{ - { - name: "query error returns an error", - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return nil, fmt.Errorf("error") - }, - }, - wantErr: true, - }, - { - name: "response marshal error returns an error", - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse("", fmt.Errorf("")), nil - }, - }, - wantErr: true, - }, - { - name: "Bad JSON returns an error", - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`{}`, nil), nil - }, - }, - wantErr: true, - }, - - { - name: "Incorrect fields returns error", - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[{ - "series": [ - { - "name": "annotations", - "columns": [ - "time", - "deleted", - "id", - "modified_time_ns", - "start_time", - "text", - "type" - ], - "values": [ - [ - 1516920117000000000, - true, - "4ba9f836-20e8-4b8e-af51-e1363edd7b6d", - 1517425994487495051, - 0, - "", - "" - ] - ] - } - ] - } - ]}]`, nil), nil - }, - }, - wantErr: true, - }, - { - name: "two annotation response", - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ - { - "series": [ - { - "name": "annotations", - "columns": [ - "time", - "start_time", - "modified_time_ns", - "text", - "type", - "id" - ], - "values": [ - [ - 1516920177345000000, - 0, - 1516989242129417403, - "mytext", - "mytype", - "ecf3a75d-f1c0-40e8-9790-902701467e92" - ], - [ - 1516920177345000000, - 0, - 1517425914433539296, - "mytext2", - "mytype2", - "ea0aa94b-969a-4cd5-912a-5db61d502268" - ] - ] - } - ] - } - ]`, nil), nil - }, - }, - want: []chronograf.Annotation{ - { - EndTime: time.Unix(0, 1516920177345000000), - StartTime: time.Unix(0, 0), - Text: "mytext2", - Type: "mytype2", - ID: "ea0aa94b-969a-4cd5-912a-5db61d502268", - }, - { - EndTime: time.Unix(0, 1516920177345000000), - StartTime: time.Unix(0, 0), - Text: "mytext", - Type: "mytype", - ID: "ecf3a75d-f1c0-40e8-9790-902701467e92", - }, - }, - }, - { - name: "same id returns one", - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ - { - "series": [ - { - "name": "annotations", - "columns": [ - "time", - "start_time", - "modified_time_ns", - "text", - "type", - "id" - ], - "values": [ - [ - 1516920177345000000, - 0, - 1516989242129417403, - "mytext", - "mytype", - "ea0aa94b-969a-4cd5-912a-5db61d502268" - ], - [ - 1516920177345000000, - 0, - 1517425914433539296, - "mytext2", - "mytype2", - "ea0aa94b-969a-4cd5-912a-5db61d502268" - ] - ] - } - ] - } - ]`, nil), nil - }, - }, - want: []chronograf.Annotation{ - { - EndTime: time.Unix(0, 1516920177345000000), - StartTime: time.Unix(0, 0), - Text: "mytext2", - Type: "mytype2", - ID: "ea0aa94b-969a-4cd5-912a-5db61d502268", - }, - }, - }, - { - name: "no responses returns empty array", - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ { } ]`, nil), nil - }, - }, - want: []chronograf.Annotation{}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - a := &AnnotationStore{ - client: tt.client, - } - got, err := a.queryAnnotations(tt.args.ctx, tt.args.query) - if (err != nil) != tt.wantErr { - t.Errorf("AnnotationStore.queryAnnotations() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("AnnotationStore.queryAnnotations() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestAnnotationStore_Update(t *testing.T) { - type fields struct { - client chronograf.TimeSeries - now Now - } - type args struct { - ctx context.Context - anno *chronograf.Annotation - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "no responses returns error", - fields: fields{ - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ { } ]`, nil), nil - }, - WriteF: func(context.Context, []chronograf.Point) error { - return nil - }, - }, - }, - args: args{ - ctx: context.Background(), - anno: &chronograf.Annotation{ - ID: "1", - }, - }, - wantErr: true, - }, - { - name: "error writing returns error", - fields: fields{ - now: func() time.Time { return time.Time{} }, - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ - { - "series": [ - { - "name": "annotations", - "columns": [ - "time", - "start_time", - "modified_time_ns", - "text", - "type", - "id" - ], - "values": [ - [ - 1516920177345000000, - 0, - 1516989242129417403, - "mytext", - "mytype", - "ecf3a75d-f1c0-40e8-9790-902701467e92" - ], - [ - 1516920177345000000, - 0, - 1517425914433539296, - "mytext2", - "mytype2", - "ea0aa94b-969a-4cd5-912a-5db61d502268" - ] - ] - } - ] - } - ]`, nil), nil - }, - WriteF: func(context.Context, []chronograf.Point) error { - return fmt.Errorf("error") - }, - }, - }, - args: args{ - ctx: context.Background(), - anno: &chronograf.Annotation{ - ID: "1", - }, - }, - wantErr: true, - }, - { - name: "Update with delete", - fields: fields{ - now: func() time.Time { return time.Time{} }, - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ - { - "series": [ - { - "name": "annotations", - "columns": [ - "time", - "start_time", - "modified_time_ns", - "text", - "type", - "id" - ], - "values": [ - [ - 1516920177345000000, - 0, - 1516989242129417403, - "mytext", - "mytype", - "ecf3a75d-f1c0-40e8-9790-902701467e92" - ] - ] - } - ] - } - ]`, nil), nil - }, - WriteF: func(context.Context, []chronograf.Point) error { - return nil - }, - }, - }, - args: args{ - ctx: context.Background(), - anno: &chronograf.Annotation{ - ID: "1", - }, - }, - }, - { - name: "Update with delete no delete", - fields: fields{ - now: func() time.Time { return time.Time{} }, - client: &mocks.TimeSeries{ - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ - { - "series": [ - { - "name": "annotations", - "columns": [ - "time", - "start_time", - "modified_time_ns", - "text", - "type", - "id" - ], - "values": [ - [ - 1516920177345000000, - 0, - 1516989242129417403, - "mytext", - "mytype", - "ecf3a75d-f1c0-40e8-9790-902701467e92" - ] - ] - } - ] - } - ]`, nil), nil - }, - WriteF: func(context.Context, []chronograf.Point) error { - return nil - }, - }, - }, - args: args{ - ctx: context.Background(), - anno: &chronograf.Annotation{ - ID: "ecf3a75d-f1c0-40e8-9790-902701467e92", - EndTime: time.Unix(0, 1516920177345000000), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - a := &AnnotationStore{ - client: tt.fields.client, - now: tt.fields.now, - } - if err := a.Update(tt.args.ctx, tt.args.anno); (err != nil) != tt.wantErr { - t.Errorf("AnnotationStore.Update() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/chronograf/influx/authorization.go b/chronograf/influx/authorization.go deleted file mode 100644 index 7c0d2f6c193..00000000000 --- a/chronograf/influx/authorization.go +++ /dev/null @@ -1,94 +0,0 @@ -package influx - -import ( - "fmt" - "net/http" - "time" - - jwt "github.com/dgrijalva/jwt-go" - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Authorizer adds optional authorization header to request -type Authorizer interface { - // Set may manipulate the request by adding the Authorization header - Set(req *http.Request) error -} - -// NoAuthorization does not add any authorization headers -type NoAuthorization struct{} - -// Set does not add authorization -func (n *NoAuthorization) Set(req *http.Request) error { return nil } - -// DefaultAuthorization creates either a shared JWT builder, basic auth or Noop -func DefaultAuthorization(src *chronograf.Source) Authorizer { - // Optionally, add the shared secret JWT token creation - if src.Username != "" && src.SharedSecret != "" { - return &BearerJWT{ - Username: src.Username, - SharedSecret: src.SharedSecret, - } - } else if src.Username != "" && src.Password != "" { - return &BasicAuth{ - Username: src.Username, - Password: src.Password, - } - } - return &NoAuthorization{} -} - -// BasicAuth adds Authorization: Basic to the request header -type BasicAuth struct { - Username string - Password string -} - -// Set adds the basic auth headers to the request -func (b *BasicAuth) Set(r *http.Request) error { - r.SetBasicAuth(b.Username, b.Password) - return nil -} - -// BearerJWT is the default Bearer for InfluxDB -type BearerJWT struct { - Username string - SharedSecret string - Now Now -} - -// Set adds an Authorization Bearer to the request if has a shared secret -func (b *BearerJWT) Set(r *http.Request) error { - if b.SharedSecret != "" && b.Username != "" { - token, err := b.Token(b.Username) - if err != nil { - return fmt.Errorf("unable to create token") - } - r.Header.Set("Authorization", "Bearer "+token) - } - return nil -} - -// Token returns the expected InfluxDB JWT signed with the sharedSecret -func (b *BearerJWT) Token(username string) (string, error) { - if b.Now == nil { - b.Now = time.Now - } - return JWT(username, b.SharedSecret, b.Now) -} - -// JWT returns a token string accepted by InfluxDB using the sharedSecret as an Authorization: Bearer header -func JWT(username, sharedSecret string, now Now) (string, error) { - token := &jwt.Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": jwt.SigningMethodHS512.Alg(), - }, - Claims: jwt.MapClaims{ - "username": username, - "exp": now().Add(time.Minute).Unix(), - }, - Method: jwt.SigningMethodHS512, - } - return token.SignedString([]byte(sharedSecret)) -} diff --git a/chronograf/influx/authorization_test.go b/chronograf/influx/authorization_test.go deleted file mode 100644 index a3553a50b0a..00000000000 --- a/chronograf/influx/authorization_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package influx - -import ( - "testing" - "time" -) - -func TestJWT(t *testing.T) { - type args struct { - username string - sharedSecret string - now Now - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - { - name: "", - args: args{ - username: "AzureDiamond", - sharedSecret: "hunter2", - now: func() time.Time { - return time.Unix(0, 0) - }, - }, - want: "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjYwLCJ1c2VybmFtZSI6IkF6dXJlRGlhbW9uZCJ9.kUWGwcpCPwV7MEk7luO1rt8036LyvG4bRL_CfseQGmz4b0S34gATx30g4xvqVAV6bwwYE0YU3P8FjG8ij4kc5g", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := JWT(tt.args.username, tt.args.sharedSecret, tt.args.now) - if (err != nil) != tt.wantErr { - t.Errorf("JWT() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("JWT() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/influx/databases.go b/chronograf/influx/databases.go deleted file mode 100644 index 042cdd2f66a..00000000000 --- a/chronograf/influx/databases.go +++ /dev/null @@ -1,269 +0,0 @@ -package influx - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -// AllDB returns all databases from within Influx -func (c *Client) AllDB(ctx context.Context) ([]chronograf.Database, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return c.showDatabases(ctx) -} - -// CreateDB creates a database within Influx -func (c *Client) CreateDB(ctx context.Context, db *chronograf.Database) (*chronograf.Database, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - _, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`CREATE DATABASE "%s"`, db.Name), - }) - if err != nil { - return nil, err - } - - res := &chronograf.Database{Name: db.Name} - - return res, nil -} - -// DropDB drops a database within Influx -func (c *Client) DropDB(ctx context.Context, db string) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - _, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`DROP DATABASE "%s"`, db), - DB: db, - }) - if err != nil { - return err - } - return nil -} - -// AllRP returns all the retention policies for a specific database -func (c *Client) AllRP(ctx context.Context, db string) ([]chronograf.RetentionPolicy, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return c.showRetentionPolicies(ctx, db) -} - -func (c *Client) getRP(ctx context.Context, db, rp string) (chronograf.RetentionPolicy, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - rs, err := c.AllRP(ctx, db) - if err != nil { - return chronograf.RetentionPolicy{}, err - } - - for _, r := range rs { - if r.Name == rp { - return r, nil - } - } - return chronograf.RetentionPolicy{}, fmt.Errorf("unknown retention policy") -} - -// CreateRP creates a retention policy for a specific database -func (c *Client) CreateRP(ctx context.Context, db string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - query := fmt.Sprintf(`CREATE RETENTION POLICY "%s" ON "%s" DURATION %s REPLICATION %d`, rp.Name, db, rp.Duration, rp.Replication) - if len(rp.ShardDuration) != 0 { - query = fmt.Sprintf(`%s SHARD DURATION %s`, query, rp.ShardDuration) - } - - if rp.Default { - query = fmt.Sprintf(`%s DEFAULT`, query) - } - - _, err := c.Query(ctx, chronograf.Query{ - Command: query, - DB: db, - }) - if err != nil { - return nil, err - } - - res, err := c.getRP(ctx, db, rp.Name) - if err != nil { - return nil, err - } - - return &res, nil -} - -// UpdateRP updates a specific retention policy for a specific database -func (c *Client) UpdateRP(ctx context.Context, db string, rp string, upd *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf(`ALTER RETENTION POLICY "%s" ON "%s"`, rp, db)) - if len(upd.Duration) > 0 { - buffer.WriteString(" DURATION " + upd.Duration) - } - if upd.Replication > 0 { - buffer.WriteString(" REPLICATION " + fmt.Sprint(upd.Replication)) - } - if len(upd.ShardDuration) > 0 { - buffer.WriteString(" SHARD DURATION " + upd.ShardDuration) - } - if upd.Default { - buffer.WriteString(" DEFAULT") - } - queryRes, err := c.Query(ctx, chronograf.Query{ - Command: buffer.String(), - DB: db, - RP: rp, - }) - if err != nil { - return nil, err - } - - // The ALTER RETENTION POLICIES statements puts the error within the results itself - // So, we have to crack open the results to see what happens - octets, err := queryRes.MarshalJSON() - if err != nil { - return nil, err - } - - results := make([]struct{ Error string }, 0) - if err := json.Unmarshal(octets, &results); err != nil { - return nil, err - } - - // At last, we can check if there are any error strings - for _, r := range results { - if r.Error != "" { - return nil, fmt.Errorf(r.Error) - } - } - - res, err := c.getRP(ctx, db, upd.Name) - if err != nil { - return nil, err - } - - return &res, nil -} - -// DropRP removes a specific retention policy for a specific database -func (c *Client) DropRP(ctx context.Context, db string, rp string) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - _, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`DROP RETENTION POLICY "%s" ON "%s"`, rp, db), - DB: db, - RP: rp, - }) - if err != nil { - return err - } - return nil -} - -// GetMeasurements returns measurements in a specified database, paginated by -// optional limit and offset. If no limit or offset is provided, it defaults to -// a limit of 100 measurements with no offset. -func (c *Client) GetMeasurements(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return c.showMeasurements(ctx, db, limit, offset) -} - -func (c *Client) showDatabases(ctx context.Context) ([]chronograf.Database, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - res, err := c.Query(ctx, chronograf.Query{ - Command: `SHOW DATABASES`, - }) - if err != nil { - return nil, err - } - octets, err := res.MarshalJSON() - if err != nil { - return nil, err - } - - results := showResults{} - if err := json.Unmarshal(octets, &results); err != nil { - return nil, err - } - - return results.Databases(), nil -} - -func (c *Client) showRetentionPolicies(ctx context.Context, db string) ([]chronograf.RetentionPolicy, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - retentionPolicies, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`SHOW RETENTION POLICIES ON "%s"`, db), - DB: db, - }) - - if err != nil { - return nil, err - } - octets, err := retentionPolicies.MarshalJSON() - if err != nil { - return nil, err - } - - results := showResults{} - if err := json.Unmarshal(octets, &results); err != nil { - return nil, err - } - - return results.RetentionPolicies(), nil -} - -func (c *Client) showMeasurements(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - show := fmt.Sprintf(`SHOW MEASUREMENTS ON "%s"`, db) - if limit > 0 { - show += fmt.Sprintf(" LIMIT %d", limit) - } - - if offset > 0 { - show += fmt.Sprintf(" OFFSET %d", offset) - } - - measurements, err := c.Query(ctx, chronograf.Query{ - Command: show, - DB: db, - }) - - if err != nil { - return nil, err - } - octets, err := measurements.MarshalJSON() - if err != nil { - return nil, err - } - - results := showResults{} - if err := json.Unmarshal(octets, &results); err != nil { - return nil, err - } - - return results.Measurements(), nil -} diff --git a/chronograf/influx/influx.go b/chronograf/influx/influx.go deleted file mode 100644 index f6de6d5eac1..00000000000 --- a/chronograf/influx/influx.go +++ /dev/null @@ -1,388 +0,0 @@ -package influx - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -var _ chronograf.TimeSeries = &Client{} -var _ chronograf.TSDBStatus = &Client{} -var _ chronograf.Databases = &Client{} - -// Shared transports for all clients to prevent leaking connections -var ( - skipVerifyTransport = &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - defaultTransport = &http.Transport{} -) - -// Client is a device for retrieving time series data from an InfluxDB instance -type Client struct { - URL *url.URL - Authorizer Authorizer - InsecureSkipVerify bool - Logger chronograf.Logger -} - -// Response is a partial JSON decoded InfluxQL response used -// to check for some errors -type Response struct { - Results json.RawMessage - Err string `json:"error,omitempty"` -} - -// MarshalJSON returns the raw results bytes from the response -func (r Response) MarshalJSON() ([]byte, error) { - return r.Results, nil -} - -func (c *Client) query(ctx context.Context, u *url.URL, q chronograf.Query) (chronograf.Response, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - u.Path = "query" - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - command := q.Command - logs := c.Logger. - WithField("component", "proxy"). - WithField("host", req.Host). - WithField("command", command). - WithField("db", q.DB). - WithField("rp", q.RP) - logs.Debug("query") - - params := req.URL.Query() - params.Set("q", command) - params.Set("db", q.DB) - params.Set("rp", q.RP) - params.Set("epoch", "ms") - if q.Epoch != "" { - params.Set("epoch", q.Epoch) - } - req.URL.RawQuery = params.Encode() - tracing.InjectToHTTPRequest(span, req) - - if c.Authorizer != nil { - if err := c.Authorizer.Set(req); err != nil { - logs.Error("Error setting authorization header ", err) - return nil, err - } - } - - hc := &http.Client{} - if c.InsecureSkipVerify { - hc.Transport = skipVerifyTransport - } else { - hc.Transport = defaultTransport - } - resp, err := hc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - dec := json.NewDecoder(resp.Body) - decErr := dec.Decode(&response) - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("received status code %d from server: err: %s", resp.StatusCode, response.Err) - } - - // ignore this error if we got an invalid status code - if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { - decErr = nil - } - - // If we got a valid decode error, send that back - if decErr != nil { - logs.WithField("influx_status", resp.StatusCode). - Error("Error parsing results from influxdb: err:", decErr) - return nil, decErr - } - - // If we don't have an error in our json response, and didn't get statusOK - // then send back an error - if resp.StatusCode != http.StatusOK && response.Err != "" { - logs. - WithField("influx_status", resp.StatusCode). - Error("Received non-200 response from influxdb") - - return &response, fmt.Errorf("received status code %d from server", - resp.StatusCode) - } - return &response, nil -} - -type result struct { - Response chronograf.Response - Err error -} - -// Query issues a request to a configured InfluxDB instance for time series -// information specified by query. Queries must be "fully-qualified," and -// include both the database and retention policy. In-flight requests can be -// cancelled using the provided context. -func (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - resps := make(chan (result)) - go func() { - resp, err := c.query(ctx, c.URL, q) - resps <- result{resp, err} - }() - - select { - case resp := <-resps: - return resp.Response, resp.Err - case <-ctx.Done(): - return nil, chronograf.ErrUpstreamTimeout - } -} - -// Connect caches the URL and optional Bearer Authorization for the data source -func (c *Client) Connect(ctx context.Context, src *chronograf.Source) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - u, err := url.Parse(src.URL) - if err != nil { - return err - } - c.Authorizer = DefaultAuthorization(src) - // Only allow acceptance of all certs if the scheme is https AND the user opted into to the setting. - if u.Scheme == "https" && src.InsecureSkipVerify { - c.InsecureSkipVerify = src.InsecureSkipVerify - } - - c.URL = u - return nil -} - -// Users transforms InfluxDB into a user store -func (c *Client) Users(ctx context.Context) chronograf.UsersStore { - return c -} - -// Roles aren't support in OSS -func (c *Client) Roles(ctx context.Context) (chronograf.RolesStore, error) { - return nil, fmt.Errorf("roles not support in open-source InfluxDB. Roles are support in Influx Enterprise") -} - -// Ping hits the influxdb ping endpoint and returns the type of influx -func (c *Client) Ping(ctx context.Context) error { - _, _, err := c.pingTimeout(ctx) - return err -} - -// Version hits the influxdb ping endpoint and returns the version of influx -func (c *Client) Version(ctx context.Context) (string, error) { - version, _, err := c.pingTimeout(ctx) - return version, err -} - -// Type hits the influxdb ping endpoint and returns the type of influx running -func (c *Client) Type(ctx context.Context) (string, error) { - _, tsdbType, err := c.pingTimeout(ctx) - return tsdbType, err -} - -func (c *Client) pingTimeout(ctx context.Context) (string, string, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - resps := make(chan (pingResult)) - go func() { - version, tsdbType, err := c.ping(ctx, c.URL) - resps <- pingResult{version, tsdbType, err} - }() - - select { - case resp := <-resps: - return resp.Version, resp.Type, resp.Err - case <-ctx.Done(): - return "", "", chronograf.ErrUpstreamTimeout - } -} - -type pingResult struct { - Version string - Type string - Err error -} - -func (c *Client) ping(ctx context.Context, u *url.URL) (string, string, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - u.Path = "ping" - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return "", "", err - } - tracing.InjectToHTTPRequest(span, req) - - hc := &http.Client{} - if c.InsecureSkipVerify { - hc.Transport = skipVerifyTransport - } else { - hc.Transport = defaultTransport - } - - resp, err := hc.Do(req) - if err != nil { - return "", "", err - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", "", err - } - - if resp.StatusCode != http.StatusNoContent { - var err = fmt.Errorf(string(body)) - return "", "", err - } - - version := resp.Header.Get("X-Influxdb-Build") - if version == "ENT" { - return version, chronograf.InfluxEnterprise, nil - } - version = resp.Header.Get("X-Influxdb-Version") - if strings.Contains(version, "-c") { - return version, chronograf.InfluxEnterprise, nil - } else if strings.Contains(version, "relay") { - return version, chronograf.InfluxRelay, nil - } - - return version, chronograf.InfluxDB, nil -} - -// Write POSTs line protocol to a database and retention policy -func (c *Client) Write(ctx context.Context, points []chronograf.Point) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - for _, point := range points { - if err := c.writePoint(ctx, &point); err != nil { - return err - } - } - return nil -} - -func (c *Client) writePoint(ctx context.Context, point *chronograf.Point) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - lp, err := toLineProtocol(point) - if err != nil { - return err - } - - err = c.write(ctx, c.URL, point.Database, point.RetentionPolicy, lp) - if err == nil { - return nil - } - - // Some influxdb errors should not be treated as errors - if strings.Contains(err.Error(), "hinted handoff queue not empty") { - // This is an informational message - return nil - } - - // If the database was not found, try to recreate it: - if strings.Contains(err.Error(), "database not found") { - _, err = c.CreateDB(ctx, &chronograf.Database{ - Name: point.Database, - }) - if err != nil { - return err - } - // retry the write - return c.write(ctx, c.URL, point.Database, point.RetentionPolicy, lp) - } - - return err -} - -func (c *Client) write(ctx context.Context, u *url.URL, db, rp, lp string) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - u.Path = "write" - req, err := http.NewRequest("POST", u.String(), strings.NewReader(lp)) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "text/plain; charset=utf-8") - if c.Authorizer != nil { - if err := c.Authorizer.Set(req); err != nil { - return err - } - } - - params := req.URL.Query() - params.Set("db", db) - params.Set("rp", rp) - req.URL.RawQuery = params.Encode() - tracing.InjectToHTTPRequest(span, req) - - hc := &http.Client{} - if c.InsecureSkipVerify { - hc.Transport = skipVerifyTransport - } else { - hc.Transport = defaultTransport - } - - errChan := make(chan (error)) - go func() { - resp, err := hc.Do(req) - if err != nil { - errChan <- err - return - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNoContent { - errChan <- nil - return - } - - var response Response - dec := json.NewDecoder(resp.Body) - err = dec.Decode(&response) - if err != nil && err.Error() != "EOF" { - errChan <- err - return - } - - errChan <- errors.New(response.Err) - }() - - select { - case err := <-errChan: - return err - case <-ctx.Done(): - return chronograf.ErrUpstreamTimeout - } -} diff --git a/chronograf/influx/influx_test.go b/chronograf/influx/influx_test.go deleted file mode 100644 index 3f9ec3ea7c8..00000000000 --- a/chronograf/influx/influx_test.go +++ /dev/null @@ -1,538 +0,0 @@ -package influx_test - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" - - gojwt "github.com/dgrijalva/jwt-go" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/influx" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -// NewClient initializes an HTTP Client for InfluxDB. -func NewClient(host string, lg chronograf.Logger) (*influx.Client, error) { - l := lg.WithField("host", host) - u, err := url.Parse(host) - if err != nil { - l.Error("Error initialize influx client: err:", err) - return nil, err - } - return &influx.Client{ - URL: u, - Logger: l, - }, nil -} - -func Test_Influx_MakesRequestsToQueryEndpoint(t *testing.T) { - t.Parallel() - called := false - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte(`{}`)) - called = true - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - })) - defer ts.Close() - - var series chronograf.TimeSeries - series, err := NewClient(ts.URL, &chronograf.NoopLogger{}) - if err != nil { - t.Fatal("Unexpected error initializing client: err:", err) - } - - query := chronograf.Query{ - Command: "show databases", - } - _, err = series.Query(context.Background(), query) - if err != nil { - t.Fatal("Expected no error but was", err) - } - - if !called { - t.Error("Expected http request to Influx but there was none") - } -} - -type MockAuthorization struct { - Bearer string - Error error -} - -func (m *MockAuthorization) Set(req *http.Request) error { - return m.Error -} -func Test_Influx_AuthorizationBearer(t *testing.T) { - t.Parallel() - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte(`{}`)) - auth := r.Header.Get("Authorization") - tokenString := strings.Split(auth, " ")[1] - token, err := gojwt.Parse(tokenString, func(token *gojwt.Token) (interface{}, error) { - if _, ok := token.Method.(*gojwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) - } - return []byte("42"), nil - }) - if err != nil { - t.Errorf("Invalid token %v", err) - } - - if claims, ok := token.Claims.(gojwt.MapClaims); ok && token.Valid { - got := claims["username"] - want := "AzureDiamond" - if got != want { - t.Errorf("Test_Influx_AuthorizationBearer got %s want %s", got, want) - } - return - } - t.Errorf("Invalid token %v", token) - })) - defer ts.Close() - - src := &chronograf.Source{ - Username: "AzureDiamond", - URL: ts.URL, - SharedSecret: "42", - } - series := &influx.Client{ - Logger: &chronograf.NoopLogger{}, - } - series.Connect(context.Background(), src) - - query := chronograf.Query{ - Command: "show databases", - } - _, err := series.Query(context.Background(), query) - if err != nil { - t.Fatal("Expected no error but was", err) - } -} - -func Test_Influx_AuthorizationBearerCtx(t *testing.T) { - t.Parallel() - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte(`{}`)) - got := r.Header.Get("Authorization") - if got == "" { - t.Error("Test_Influx_AuthorizationBearerCtx got empty string") - } - incomingToken := strings.Split(got, " ")[1] - - alg := func(token *gojwt.Token) (interface{}, error) { - if _, ok := token.Method.(*gojwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) - } - return []byte("hunter2"), nil - } - claims := &gojwt.MapClaims{} - token, err := gojwt.ParseWithClaims(string(incomingToken), claims, alg) - if err != nil { - t.Errorf("Test_Influx_AuthorizationBearerCtx unexpected claims error %v", err) - } - if !token.Valid { - t.Error("Test_Influx_AuthorizationBearerCtx unexpected valid claim") - } - if err := claims.Valid(); err != nil { - t.Errorf("Test_Influx_AuthorizationBearerCtx not expires already %v", err) - } - user := (*claims)["username"].(string) - if user != "AzureDiamond" { - t.Errorf("Test_Influx_AuthorizationBearerCtx expected username AzureDiamond but got %s", user) - } - })) - defer ts.Close() - - series := &influx.Client{ - Logger: &chronograf.NoopLogger{}, - } - - err := series.Connect(context.Background(), &chronograf.Source{ - Username: "AzureDiamond", - SharedSecret: "hunter2", - URL: ts.URL, - InsecureSkipVerify: true, - }) - if err != nil { - t.Fatal(err) - } - - query := chronograf.Query{ - Command: "show databases", - } - _, err = series.Query(context.Background(), query) - if err != nil { - t.Fatal("Expected no error but was", err) - } -} - -func Test_Influx_AuthorizationBearerFailure(t *testing.T) { - t.Parallel() - bearer := &MockAuthorization{ - Error: fmt.Errorf("cracked1337"), - } - - u, _ := url.Parse("http://haxored.net") - u.User = url.UserPassword("AzureDiamond", "hunter2") - series := &influx.Client{ - URL: u, - Authorizer: bearer, - Logger: &chronograf.NoopLogger{}, - } - - query := chronograf.Query{ - Command: "show databases", - } - _, err := series.Query(context.Background(), query) - if err == nil { - t.Fatal("Test_Influx_AuthorizationBearerFailure Expected error but received nil") - } -} - -func Test_Influx_HTTPS_Failure(t *testing.T) { - called := false - ts := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - called = true - })) - defer ts.Close() - - ctx := context.Background() - var series chronograf.TimeSeries - series, err := NewClient(ts.URL, &chronograf.NoopLogger{}) - if err != nil { - t.Fatal("Unexpected error initializing client: err:", err) - } - - src := chronograf.Source{ - URL: ts.URL, - } - if err := series.Connect(ctx, &src); err != nil { - t.Fatal("Unexpected error connecting to client: err:", err) - } - - query := chronograf.Query{ - Command: "show databases", - } - _, err = series.Query(ctx, query) - if err == nil { - t.Error("Expected error but was successful") - } - - if called { - t.Error("Expected http request to fail, but, succeeded") - } -} - -func Test_Influx_HTTPS_InsecureSkipVerify(t *testing.T) { - t.Parallel() - called := false - q := "" - ts := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte(`{}`)) - called = true - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - values := r.URL.Query() - q = values.Get("q") - })) - defer ts.Close() - - ctx := context.Background() - var series chronograf.TimeSeries - series, err := NewClient(ts.URL, &chronograf.NoopLogger{}) - if err != nil { - t.Fatal("Unexpected error initializing client: err:", err) - } - - src := chronograf.Source{ - URL: ts.URL, - InsecureSkipVerify: true, - } - if err := series.Connect(ctx, &src); err != nil { - t.Fatal("Unexpected error connecting to client: err:", err) - } - - query := chronograf.Query{ - Command: "show databases", - } - _, err = series.Query(ctx, query) - if err != nil { - t.Fatal("Expected no error but was", err) - } - - if !called { - t.Error("Expected http request to Influx but there was none") - } - called = false - q = "" - query = chronograf.Query{ - Command: `select "usage_user" from cpu`, - } - _, err = series.Query(ctx, query) - if err != nil { - t.Fatal("Expected no error but was", err) - } - - if !called { - t.Error("Expected http request to Influx but there was none") - } - - if q != `select "usage_user" from cpu` { - t.Errorf("Unexpected query: %s", q) - } -} - -func Test_Influx_CancelsInFlightRequests(t *testing.T) { - t.Parallel() - - started := make(chan bool, 1) - finished := make(chan bool, 1) - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - started <- true - time.Sleep(20 * time.Millisecond) - finished <- true - })) - defer func() { - ts.CloseClientConnections() - ts.Close() - }() - - series, _ := NewClient(ts.URL, &chronograf.NoopLogger{}) - ctx, cancel := context.WithCancel(context.Background()) - - errs := make(chan (error)) - go func() { - query := chronograf.Query{ - Command: "show databases", - } - - _, err := series.Query(ctx, query) - errs <- err - }() - - timer := time.NewTimer(10 * time.Second) - defer timer.Stop() - - select { - case s := <-started: - if !s { - t.Errorf("Expected cancellation during request processing. Started: %t", s) - } - case <-timer.C: - t.Fatalf("Expected server to finish") - } - - cancel() - - select { - case f := <-finished: - if !f { - t.Errorf("Expected cancellation during request processing. Finished: %t", f) - } - case <-timer.C: - t.Fatalf("Expected server to finish") - } - - err := <-errs - if err != chronograf.ErrUpstreamTimeout { - t.Error("Expected timeout error but wasn't. err was", err) - } -} - -func Test_Influx_RejectsInvalidHosts(t *testing.T) { - _, err := NewClient(":", &chronograf.NoopLogger{}) - if err == nil { - t.Fatal("Expected err but was nil") - } -} - -func Test_Influx_ReportsInfluxErrs(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - })) - defer ts.Close() - - cl, err := NewClient(ts.URL, &chronograf.NoopLogger{}) - if err != nil { - t.Fatal("Encountered unexpected error while initializing influx client: err:", err) - } - - _, err = cl.Query(context.Background(), chronograf.Query{ - Command: "show shards", - DB: "_internal", - RP: "autogen", - }) - if err == nil { - t.Fatal("Expected an error but received none") - } -} - -func TestClient_Roles(t *testing.T) { - c := &influx.Client{} - _, err := c.Roles(context.Background()) - if err == nil { - t.Errorf("Client.Roles() want error") - } -} - -func TestClient_write(t *testing.T) { - type fields struct { - Authorizer influx.Authorizer - InsecureSkipVerify bool - Logger chronograf.Logger - } - type args struct { - ctx context.Context - point chronograf.Point - } - tests := []struct { - name string - fields fields - args args - body string - wantErr bool - }{ - { - name: "write point to influxdb", - fields: fields{ - Logger: mocks.NewLogger(), - }, - args: args{ - ctx: context.Background(), - point: chronograf.Point{ - Database: "mydb", - RetentionPolicy: "myrp", - Measurement: "mymeas", - Time: 10, - Tags: map[string]string{ - "tag1": "value1", - "tag2": "value2", - }, - Fields: map[string]interface{}{ - "field1": "value1", - }, - }, - }, - }, - { - name: "point without fields", - args: args{ - ctx: context.Background(), - point: chronograf.Point{}, - }, - wantErr: true, - }, - { - name: "hinted handoff errors are not errors really.", - fields: fields{ - Logger: mocks.NewLogger(), - }, - args: args{ - ctx: context.Background(), - point: chronograf.Point{ - Database: "mydb", - RetentionPolicy: "myrp", - Measurement: "mymeas", - Time: 10, - Tags: map[string]string{ - "tag1": "value1", - "tag2": "value2", - }, - Fields: map[string]interface{}{ - "field1": "value1", - }, - }, - }, - body: `{"error":"hinted handoff queue not empty"}`, - }, - { - name: "database not found creates a new db", - fields: fields{ - Logger: mocks.NewLogger(), - }, - args: args{ - ctx: context.Background(), - point: chronograf.Point{ - Database: "mydb", - RetentionPolicy: "myrp", - Measurement: "mymeas", - Time: 10, - Tags: map[string]string{ - "tag1": "value1", - "tag2": "value2", - }, - Fields: map[string]interface{}{ - "field1": "value1", - }, - }, - }, - body: `{"error":"database not found"}`, - }, - { - name: "error from database reported", - fields: fields{ - Logger: mocks.NewLogger(), - }, - args: args{ - ctx: context.Background(), - point: chronograf.Point{ - Database: "mydb", - RetentionPolicy: "myrp", - Measurement: "mymeas", - Time: 10, - Tags: map[string]string{ - "tag1": "value1", - "tag2": "value2", - }, - Fields: map[string]interface{}{ - "field1": "value1", - }, - }, - }, - body: `{"error":"oh no!"}`, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - retry := 0 // if the retry is > 0 then we don't error - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if strings.HasPrefix(r.RequestURI, "/write") { - if tt.body == "" || retry > 0 { - rw.WriteHeader(http.StatusNoContent) - return - } - retry++ - rw.WriteHeader(http.StatusBadRequest) - rw.Write([]byte(tt.body)) - return - } - rw.WriteHeader(http.StatusOK) - rw.Write([]byte(`{"results":[{}]}`)) - })) - defer ts.Close() - u, _ := url.Parse(ts.URL) - c := &influx.Client{ - URL: u, - Authorizer: tt.fields.Authorizer, - InsecureSkipVerify: tt.fields.InsecureSkipVerify, - Logger: tt.fields.Logger, - } - if err := c.Write(tt.args.ctx, []chronograf.Point{tt.args.point}); (err != nil) != tt.wantErr { - t.Errorf("Client.write() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/chronograf/influx/lineprotocol.go b/chronograf/influx/lineprotocol.go deleted file mode 100644 index 082e6474590..00000000000 --- a/chronograf/influx/lineprotocol.go +++ /dev/null @@ -1,84 +0,0 @@ -package influx - -import ( - "fmt" - "sort" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var ( - escapeMeasurement = strings.NewReplacer( - `,` /* to */, `\,`, - ` ` /* to */, `\ `, - ) - escapeKeys = strings.NewReplacer( - `,` /* to */, `\,`, - `"` /* to */, `\"`, - ` ` /* to */, `\ `, - `=` /* to */, `\=`, - ) - escapeTagValues = strings.NewReplacer( - `,` /* to */, `\,`, - `"` /* to */, `\"`, - ` ` /* to */, `\ `, - `=` /* to */, `\=`, - ) - escapeFieldStrings = strings.NewReplacer( - `"` /* to */, `\"`, - `\` /* to */, `\\`, - ) -) - -func toLineProtocol(point *chronograf.Point) (string, error) { - measurement := escapeMeasurement.Replace(point.Measurement) - if len(measurement) == 0 { - return "", fmt.Errorf("measurement required to write point") - } - if len(point.Fields) == 0 { - return "", fmt.Errorf("at least one field required to write point") - } - - tags := []string{} - for tag, value := range point.Tags { - if value != "" { - t := fmt.Sprintf("%s=%s", escapeKeys.Replace(tag), escapeTagValues.Replace(value)) - tags = append(tags, t) - } - } - // it is faster to insert data into influx db if the tags are sorted - sort.Strings(tags) - - fields := []string{} - for field, value := range point.Fields { - var format string - switch v := value.(type) { - case int64, int32, int16, int8, int: - format = fmt.Sprintf("%s=%di", escapeKeys.Replace(field), v) - case uint64, uint32, uint16, uint8, uint: - format = fmt.Sprintf("%s=%du", escapeKeys.Replace(field), v) - case float64, float32: - format = fmt.Sprintf("%s=%f", escapeKeys.Replace(field), v) - case string: - format = fmt.Sprintf(`%s="%s"`, escapeKeys.Replace(field), escapeFieldStrings.Replace(v)) - case bool: - format = fmt.Sprintf("%s=%t", escapeKeys.Replace(field), v) - } - if format != "" { - fields = append(fields, format) - } - } - sort.Strings(fields) - - lp := measurement - if len(tags) > 0 { - lp += fmt.Sprintf(",%s", strings.Join(tags, ",")) - } - - lp += fmt.Sprintf(" %s", strings.Join(fields, ",")) - if point.Time != 0 { - lp += fmt.Sprintf(" %d", point.Time) - } - return lp, nil -} diff --git a/chronograf/influx/lineprotocol_test.go b/chronograf/influx/lineprotocol_test.go deleted file mode 100644 index a3466be959f..00000000000 --- a/chronograf/influx/lineprotocol_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package influx - -import ( - "testing" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func Test_toLineProtocol(t *testing.T) { - tests := []struct { - name string - point *chronograf.Point - want string - wantErr bool - }{ - 0: { - name: "requires a measurement", - point: &chronograf.Point{}, - wantErr: true, - }, - 1: { - name: "requires at least one field", - point: &chronograf.Point{ - Measurement: "telegraf", - }, - wantErr: true, - }, - 2: { - name: "no tags produces line protocol", - point: &chronograf.Point{ - Measurement: "telegraf", - Fields: map[string]interface{}{ - "myfield": 1, - }, - }, - want: "telegraf myfield=1i", - }, - 3: { - name: "test all influx data types", - point: &chronograf.Point{ - Measurement: "telegraf", - Fields: map[string]interface{}{ - "int": 19, - "uint": uint(85), - "float": 88.0, - "string": "mph", - "time_machine": true, - "invalidField": time.Time{}, - }, - }, - want: `telegraf float=88.000000,int=19i,string="mph",time_machine=true,uint=85u`, - }, - 4: { - name: "test all influx data types", - point: &chronograf.Point{ - Measurement: "telegraf", - Tags: map[string]string{ - "marty": "mcfly", - "doc": "brown", - }, - Fields: map[string]interface{}{ - "int": 19, - "uint": uint(85), - "float": 88.0, - "string": "mph", - "time_machine": true, - "invalidField": time.Time{}, - }, - Time: 497115501000000000, - }, - want: `telegraf,doc=brown,marty=mcfly float=88.000000,int=19i,string="mph",time_machine=true,uint=85u 497115501000000000`, - }, - 5: { - name: "measurements with comma or spaces are escaped", - point: &chronograf.Point{ - Measurement: "O Romeo, Romeo, wherefore art thou Romeo", - Tags: map[string]string{ - "part": "JULIET", - }, - Fields: map[string]interface{}{ - "act": 2, - "scene": 2, - "page": 2, - "line": 33, - }, - }, - want: `O\ Romeo\,\ Romeo\,\ wherefore\ art\ thou\ Romeo,part=JULIET act=2i,line=33i,page=2i,scene=2i`, - }, - 6: { - name: "tags with comma, quota, space, equal are escaped", - point: &chronograf.Point{ - Measurement: "quotes", - Tags: map[string]string{ - "comma,": "comma,", - `quote"`: `quote"`, - "space ": `space "`, - "equal=": "equal=", - }, - Fields: map[string]interface{}{ - "myfield": 1, - }, - }, - want: `quotes,comma\,=comma\,,equal\==equal\=,quote\"=quote\",space\ =space\ \" myfield=1i`, - }, - 7: { - name: "fields with quotes or backslashes are escaped", - point: &chronograf.Point{ - Measurement: "quotes", - Fields: map[string]interface{}{ - `quote"\`: `quote"\`, - }, - }, - want: `quotes quote\"\="quote\"\\"`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := toLineProtocol(tt.point) - if (err != nil) != tt.wantErr { - t.Errorf("toLineProtocol() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("toLineProtocol() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/influx/now.go b/chronograf/influx/now.go deleted file mode 100644 index 9bbcf3a0781..00000000000 --- a/chronograf/influx/now.go +++ /dev/null @@ -1,6 +0,0 @@ -package influx - -import "time" - -// Now returns the current time -type Now func() time.Time diff --git a/chronograf/influx/permissions.go b/chronograf/influx/permissions.go deleted file mode 100644 index b10da2ced5a..00000000000 --- a/chronograf/influx/permissions.go +++ /dev/null @@ -1,278 +0,0 @@ -package influx - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var ( - // AllowAllDB means a user gets both read and write permissions for a db - AllowAllDB = chronograf.Allowances{"WRITE", "READ"} - // AllowAllAdmin means a user gets both read and write permissions for an admin - AllowAllAdmin = chronograf.Allowances{"ALL"} - // AllowRead means a user is only able to read the database. - AllowRead = chronograf.Allowances{"READ"} - // AllowWrite means a user is able to only write to the database - AllowWrite = chronograf.Allowances{"WRITE"} - // NoPrivileges occasionally shows up as a response for a users grants. - NoPrivileges = "NO PRIVILEGES" - // AllPrivileges means that a user has both read and write perms - AllPrivileges = "ALL PRIVILEGES" - // All means a user has both read and write perms. Alternative to AllPrivileges - All = "ALL" - // Read means a user can read a database - Read = "READ" - // Write means a user can write to a database - Write = "WRITE" -) - -// Permissions return just READ and WRITE for OSS Influx -func (c *Client) Permissions(context.Context) chronograf.Permissions { - return chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: AllowAllAdmin, - }, - { - Scope: chronograf.DBScope, - Allowed: AllowAllDB, - }, - } -} - -// showResults is used to deserialize InfluxQL SHOW commands -type showResults []struct { - Series []struct { - Values [][]interface{} `json:"values"` - } `json:"series"` -} - -// Users converts SHOW USERS to chronograf Users -func (r *showResults) Users() []chronograf.User { - res := []chronograf.User{} - for _, u := range *r { - for _, s := range u.Series { - for _, v := range s.Values { - if name, ok := v[0].(string); !ok { - continue - } else if admin, ok := v[1].(bool); !ok { - continue - } else { - c := chronograf.User{ - Name: name, - Permissions: chronograf.Permissions{}, - } - if admin { - c.Permissions = adminPerms() - } - res = append(res, c) - } - } - } - } - return res -} - -// Databases converts SHOW DATABASES to chronograf Databases -func (r *showResults) Databases() []chronograf.Database { - res := []chronograf.Database{} - for _, u := range *r { - for _, s := range u.Series { - for _, v := range s.Values { - if name, ok := v[0].(string); !ok { - continue - } else { - d := chronograf.Database{Name: name} - res = append(res, d) - } - } - } - } - return res -} - -func (r *showResults) RetentionPolicies() []chronograf.RetentionPolicy { - res := []chronograf.RetentionPolicy{} - for _, u := range *r { - for _, s := range u.Series { - for _, v := range s.Values { - if name, ok := v[0].(string); !ok { - continue - } else if duration, ok := v[1].(string); !ok { - continue - } else if sduration, ok := v[2].(string); !ok { - continue - } else if replication, ok := v[3].(float64); !ok { - continue - } else if def, ok := v[4].(bool); !ok { - continue - } else { - d := chronograf.RetentionPolicy{ - Name: name, - Duration: duration, - ShardDuration: sduration, - Replication: int32(replication), - Default: def, - } - res = append(res, d) - } - } - } - } - return res -} - -// Measurements converts SHOW MEASUREMENTS to chronograf Measurement -func (r *showResults) Measurements() []chronograf.Measurement { - res := []chronograf.Measurement{} - for _, u := range *r { - for _, s := range u.Series { - for _, v := range s.Values { - if name, ok := v[0].(string); !ok { - continue - } else { - d := chronograf.Measurement{Name: name} - res = append(res, d) - } - } - } - } - return res -} - -// Permissions converts SHOW GRANTS to chronograf.Permissions -func (r *showResults) Permissions() chronograf.Permissions { - res := []chronograf.Permission{} - for _, u := range *r { - for _, s := range u.Series { - for _, v := range s.Values { - if db, ok := v[0].(string); !ok { - continue - } else if priv, ok := v[1].(string); !ok { - continue - } else { - c := chronograf.Permission{ - Name: db, - Scope: chronograf.DBScope, - } - switch priv { - case AllPrivileges, All: - c.Allowed = AllowAllDB - case Read: - c.Allowed = AllowRead - case Write: - c.Allowed = AllowWrite - default: - // sometimes influx reports back NO PRIVILEGES - continue - } - res = append(res, c) - } - } - } - } - return res -} - -func adminPerms() chronograf.Permissions { - return []chronograf.Permission{ - { - Scope: chronograf.AllScope, - Allowed: AllowAllAdmin, - }, - } -} - -// ToInfluxQL converts the permission into InfluxQL -func ToInfluxQL(action, preposition, username string, perm chronograf.Permission) string { - if perm.Scope == chronograf.AllScope { - return fmt.Sprintf(`%s ALL PRIVILEGES %s "%s"`, action, preposition, username) - } else if len(perm.Allowed) == 0 { - // All privileges are to be removed for this user on this database - return fmt.Sprintf(`%s ALL PRIVILEGES ON "%s" %s "%s"`, action, perm.Name, preposition, username) - } - priv := ToPriv(perm.Allowed) - if priv == NoPrivileges { - return "" - } - return fmt.Sprintf(`%s %s ON "%s" %s "%s"`, action, priv, perm.Name, preposition, username) -} - -// ToRevoke converts the permission into InfluxQL revokes -func ToRevoke(username string, perm chronograf.Permission) string { - return ToInfluxQL("REVOKE", "FROM", username, perm) -} - -// ToGrant converts the permission into InfluxQL grants -func ToGrant(username string, perm chronograf.Permission) string { - if len(perm.Allowed) == 0 { - return "" - } - return ToInfluxQL("GRANT", "TO", username, perm) -} - -// ToPriv converts chronograf allowances to InfluxQL -func ToPriv(a chronograf.Allowances) string { - if len(a) == 0 { - return NoPrivileges - } - hasWrite := false - hasRead := false - for _, aa := range a { - if aa == Read { - hasRead = true - } else if aa == Write { - hasWrite = true - } else if aa == All { - hasRead, hasWrite = true, true - } - } - - if hasWrite && hasRead { - return All - } else if hasWrite { - return Write - } else if hasRead { - return Read - } - return NoPrivileges -} - -// Difference compares two permission sets and returns a set to be revoked and a set to be added -func Difference(wants chronograf.Permissions, haves chronograf.Permissions) (revoke chronograf.Permissions, add chronograf.Permissions) { - for _, want := range wants { - found := false - for _, got := range haves { - if want.Scope != got.Scope || want.Name != got.Name { - continue - } - found = true - if len(want.Allowed) == 0 { - revoke = append(revoke, want) - } else { - add = append(add, want) - } - break - } - if !found { - add = append(add, want) - } - } - - for _, got := range haves { - found := false - for _, want := range wants { - if want.Scope != got.Scope || want.Name != got.Name { - continue - } - found = true - break - } - if !found { - revoke = append(revoke, got) - } - } - return -} diff --git a/chronograf/influx/permissions_test.go b/chronograf/influx/permissions_test.go deleted file mode 100644 index 0050cf0252a..00000000000 --- a/chronograf/influx/permissions_test.go +++ /dev/null @@ -1,422 +0,0 @@ -package influx - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestDifference(t *testing.T) { - t.Parallel() - type args struct { - wants chronograf.Permissions - haves chronograf.Permissions - } - tests := []struct { - name string - args args - wantRevoke chronograf.Permissions - wantAdd chronograf.Permissions - }{ - { - name: "add write to permissions", - args: args{ - wants: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ", "WRITE"}, - }, - }, - haves: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ"}, - }, - }, - }, - wantRevoke: nil, - wantAdd: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ", "WRITE"}, - }, - }, - }, - { - name: "revoke write to permissions", - args: args{ - wants: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ"}, - }, - }, - haves: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ", "WRITE"}, - }, - }, - }, - wantRevoke: nil, - wantAdd: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ"}, - }, - }, - }, - { - name: "revoke all permissions", - args: args{ - wants: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{}, - }, - }, - haves: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ", "WRITE"}, - }, - }, - }, - wantRevoke: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{}, - }, - }, - wantAdd: nil, - }, - { - name: "add permissions different db", - args: args{ - wants: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "new", - Allowed: []string{"READ"}, - }, - }, - haves: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "old", - Allowed: []string{"READ", "WRITE"}, - }, - }, - }, - wantRevoke: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "old", - Allowed: []string{"READ", "WRITE"}, - }, - }, - wantAdd: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "new", - Allowed: []string{"READ"}, - }, - }, - }, - } - for _, tt := range tests { - gotRevoke, gotAdd := Difference(tt.args.wants, tt.args.haves) - if !reflect.DeepEqual(gotRevoke, tt.wantRevoke) { - t.Errorf("%q. Difference() gotRevoke = %v, want %v", tt.name, gotRevoke, tt.wantRevoke) - } - if !reflect.DeepEqual(gotAdd, tt.wantAdd) { - t.Errorf("%q. Difference() gotAdd = %v, want %v", tt.name, gotAdd, tt.wantAdd) - } - } -} - -func TestToPriv(t *testing.T) { - t.Parallel() - type args struct { - a chronograf.Allowances - } - tests := []struct { - name string - args args - want string - }{ - { - name: "no privs", - args: args{ - a: chronograf.Allowances{}, - }, - want: NoPrivileges, - }, - { - name: "read and write privs", - args: args{ - a: chronograf.Allowances{"READ", "WRITE"}, - }, - want: All, - }, - { - name: "write privs", - args: args{ - a: chronograf.Allowances{"WRITE"}, - }, - want: Write, - }, - { - name: "read privs", - args: args{ - a: chronograf.Allowances{"READ"}, - }, - want: Read, - }, - { - name: "all privs", - args: args{ - a: chronograf.Allowances{"ALL"}, - }, - want: All, - }, - { - name: "bad privs", - args: args{ - a: chronograf.Allowances{"BAD"}, - }, - want: NoPrivileges, - }, - } - for _, tt := range tests { - if got := ToPriv(tt.args.a); got != tt.want { - t.Errorf("%q. ToPriv() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestToGrant(t *testing.T) { - t.Parallel() - type args struct { - username string - perm chronograf.Permission - } - tests := []struct { - name string - args args - want string - }{ - { - name: "grant all for all dbs", - args: args{ - username: "biff", - perm: chronograf.Permission{ - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ALL"}, - }, - }, - want: `GRANT ALL PRIVILEGES TO "biff"`, - }, - { - name: "grant all for one db", - args: args{ - username: "biff", - perm: chronograf.Permission{ - Scope: chronograf.DBScope, - Name: "gray_sports_almanac", - Allowed: chronograf.Allowances{"ALL"}, - }, - }, - want: `GRANT ALL ON "gray_sports_almanac" TO "biff"`, - }, - { - name: "bad allowance", - args: args{ - username: "biff", - perm: chronograf.Permission{ - Scope: chronograf.DBScope, - Name: "gray_sports_almanac", - Allowed: chronograf.Allowances{"bad"}, - }, - }, - want: "", - }, - } - for _, tt := range tests { - if got := ToGrant(tt.args.username, tt.args.perm); got != tt.want { - t.Errorf("%q. ToGrant() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestToRevoke(t *testing.T) { - t.Parallel() - type args struct { - username string - perm chronograf.Permission - } - tests := []struct { - name string - args args - want string - }{ - { - name: "revoke all for all dbs", - args: args{ - username: "biff", - perm: chronograf.Permission{ - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ALL"}, - }, - }, - want: `REVOKE ALL PRIVILEGES FROM "biff"`, - }, - { - name: "revoke all for one db", - args: args{ - username: "biff", - perm: chronograf.Permission{ - Scope: chronograf.DBScope, - Name: "pleasure_paradice", - Allowed: chronograf.Allowances{}, - }, - }, - want: `REVOKE ALL PRIVILEGES ON "pleasure_paradice" FROM "biff"`, - }, - } - for _, tt := range tests { - if got := ToRevoke(tt.args.username, tt.args.perm); got != tt.want { - t.Errorf("%q. ToRevoke() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func Test_showResults_Users(t *testing.T) { - t.Parallel() - tests := []struct { - name string - octets []byte - want []chronograf.User - }{ - { - name: "admin and non-admin", - octets: []byte(`[{"series":[{"columns":["user","admin"],"values":[["admin",true],["reader",false]]}]}]`), - want: []chronograf.User{ - { - Name: "admin", - Permissions: chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"ALL"}, - }, - }, - }, - { - Name: "reader", - Permissions: chronograf.Permissions{}, - }, - }, - }, - { - name: "bad JSON", - octets: []byte(`[{"series":[{"columns":["user","admin"],"values":[[1,true],["reader","false"]]}]}]`), - want: []chronograf.User{}, - }, - } - - for _, tt := range tests { - r := &showResults{} - json.Unmarshal(tt.octets, r) - if got := r.Users(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. showResults.Users() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func Test_showResults_Permissions(t *testing.T) { - t.Parallel() - tests := []struct { - name string - octets []byte - want chronograf.Permissions - }{ - { - name: "write for one db", - octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","WRITE"]]}]}]`), - want: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"WRITE"}, - }, - }, - }, - { - name: "all for one db", - octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","ALL PRIVILEGES"]]}]}]`), - want: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - { - name: "read for one db", - octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","READ"]]}]}]`), - want: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"READ"}, - }, - }, - }, - { - name: "other all for one db", - octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","ALL"]]}]}]`), - want: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "tensorflowdb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - { - name: "other all for one db", - octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","NO PRIVILEGES"]]}]}]`), - want: chronograf.Permissions{}, - }, - { - name: "bad JSON", - octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[[1,"WRITE"]]}]}]`), - want: chronograf.Permissions{}, - }, - { - name: "bad JSON", - octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb",1]]}]}]`), - want: chronograf.Permissions{}, - }, - } - - for _, tt := range tests { - r := &showResults{} - json.Unmarshal(tt.octets, r) - if got := r.Permissions(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. showResults.Users() = %v, want %v", tt.name, got, tt.want) - } - } -} diff --git a/chronograf/influx/queries/select.go b/chronograf/influx/queries/select.go deleted file mode 100644 index 705edc9825d..00000000000 --- a/chronograf/influx/queries/select.go +++ /dev/null @@ -1,491 +0,0 @@ -package queries - -import ( - "encoding/json" - "errors" - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/influxdata/influxql" -) - -type literalJSON struct { - Expr string `json:"expr"` - Val string `json:"val"` - Type string `json:"type"` -} - -func ParseSelect(q string) (*SelectStatement, error) { - stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() - if err != nil { - return nil, err - } - s, ok := stmt.(*influxql.SelectStatement) - if !ok { - return nil, fmt.Errorf("error parsing query: not a SELECT statement") - } - return &SelectStatement{s}, nil -} - -type BinaryExpr struct { - *influxql.BinaryExpr -} - -func (b *BinaryExpr) MarshalJSON() ([]byte, error) { - octets, err := MarshalJSON(b.BinaryExpr.LHS) - if err != nil { - log.Fatalln(err) - return nil, err - } - lhs := json.RawMessage(octets) - - octets, err = MarshalJSON(b.BinaryExpr.RHS) - if err != nil { - log.Fatalln(err) - return nil, err - } - rhs := json.RawMessage(octets) - - return json.Marshal(struct { - Expr string `json:"expr"` - Op string `json:"op"` - LHS *json.RawMessage `json:"lhs"` - RHS *json.RawMessage `json:"rhs"` - }{"binary", b.Op.String(), &lhs, &rhs}) -} - -type Call struct { - *influxql.Call -} - -func (c *Call) MarshalJSON() ([]byte, error) { - args := make([]json.RawMessage, len(c.Args)) - for i, arg := range c.Args { - b, err := MarshalJSON(arg) - if err != nil { - return nil, err - } - args[i] = b - } - return json.Marshal(struct { - Expr string `json:"expr"` - Name string `json:"name"` - Args []json.RawMessage `json:"args,omitempty"` - }{"call", c.Name, args}) -} - -type Distinct struct { - *influxql.Distinct -} - -func (d *Distinct) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`{"expr": "distinct", "val": "%s"}`, d.Val)), nil -} - -type Fill struct { - Option influxql.FillOption - Value interface{} -} - -func (f *Fill) MarshalJSON() ([]byte, error) { - var fill string - switch f.Option { - case influxql.NullFill: - fill = "null" - case influxql.NoFill: - fill = "none" - case influxql.PreviousFill: - fill = "previous" - case influxql.LinearFill: - fill = "linear" - case influxql.NumberFill: - fill = fmt.Sprintf("%v", f.Value) - } - return json.Marshal(fill) -} - -type ParenExpr struct { - *influxql.ParenExpr -} - -func (p *ParenExpr) MarshalJSON() ([]byte, error) { - expr, err := MarshalJSON(p.Expr) - if err != nil { - log.Fatalln(err) - return nil, err - } - return []byte(fmt.Sprintf(`{"expr": "paren", "val": %s}`, expr)), nil -} - -func LiteralJSON(lit string, litType string) ([]byte, error) { - result := literalJSON{ - Expr: "literal", - Val: lit, - Type: litType, - } - return json.Marshal(result) -} - -type BooleanLiteral struct { - *influxql.BooleanLiteral -} - -func (b *BooleanLiteral) MarshalJSON() ([]byte, error) { - return LiteralJSON(b.String(), "boolean") -} - -type DurationLiteral struct { - *influxql.DurationLiteral -} - -func (d *DurationLiteral) MarshalJSON() ([]byte, error) { - return LiteralJSON(d.String(), "duration") -} - -type IntegerLiteral struct { - *influxql.IntegerLiteral -} - -func (i *IntegerLiteral) MarshalJSON() ([]byte, error) { - return LiteralJSON(i.String(), "integer") -} - -type NumberLiteral struct { - *influxql.NumberLiteral -} - -func (n *NumberLiteral) MarshalJSON() ([]byte, error) { - return LiteralJSON(n.String(), "number") -} - -type RegexLiteral struct { - *influxql.RegexLiteral -} - -func (r *RegexLiteral) MarshalJSON() ([]byte, error) { - return LiteralJSON(r.String(), "regex") -} - -// TODO: I don't think list is right -type ListLiteral struct { - *influxql.ListLiteral -} - -func (l *ListLiteral) MarshalJSON() ([]byte, error) { - vals := make([]string, len(l.Vals)) - for i, v := range l.Vals { - vals[i] = fmt.Sprintf(`"%s"`, v) - } - list := "[" + strings.Join(vals, ",") + "]" - return []byte(list), nil -} - -type StringLiteral struct { - *influxql.StringLiteral -} - -func (s *StringLiteral) MarshalJSON() ([]byte, error) { - return LiteralJSON(s.Val, "string") -} - -type TimeLiteral struct { - *influxql.TimeLiteral -} - -func (t *TimeLiteral) MarshalJSON() ([]byte, error) { - return LiteralJSON(t.Val.UTC().Format(time.RFC3339Nano), "time") -} - -type VarRef struct { - *influxql.VarRef -} - -func (v *VarRef) MarshalJSON() ([]byte, error) { - if v.Type != influxql.Unknown { - return []byte(fmt.Sprintf(`{"expr": "reference", "val": "%s", "type": "%s"}`, v.Val, v.Type.String())), nil - } else { - return []byte(fmt.Sprintf(`{"expr": "reference", "val": "%s"}`, v.Val)), nil - } -} - -type Wildcard struct { - *influxql.Wildcard -} - -func (w *Wildcard) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`{"expr": "wildcard", "val": "%s"}`, w.String())), nil -} - -func MarshalJSON(v interface{}) ([]byte, error) { - switch v := v.(type) { - case *influxql.BinaryExpr: - return json.Marshal(&BinaryExpr{v}) - case *influxql.BooleanLiteral: - return json.Marshal(&BooleanLiteral{v}) - case *influxql.Call: - return json.Marshal(&Call{v}) - case *influxql.Distinct: - return json.Marshal(&Distinct{v}) - case *influxql.DurationLiteral: - return json.Marshal(&DurationLiteral{v}) - case *influxql.IntegerLiteral: - return json.Marshal(&IntegerLiteral{v}) - case *influxql.NumberLiteral: - return json.Marshal(&NumberLiteral{v}) - case *influxql.ParenExpr: - return json.Marshal(&ParenExpr{v}) - case *influxql.RegexLiteral: - return json.Marshal(&RegexLiteral{v}) - case *influxql.ListLiteral: - return json.Marshal(&ListLiteral{v}) - case *influxql.StringLiteral: - return json.Marshal(&StringLiteral{v}) - case *influxql.TimeLiteral: - return json.Marshal(&TimeLiteral{v}) - case *influxql.VarRef: - return json.Marshal(&VarRef{v}) - case *influxql.Wildcard: - return json.Marshal(&Wildcard{v}) - default: - t := reflect.TypeOf(v) - return nil, fmt.Errorf("error marshaling query: unknown type %s", t) - } -} - -type Measurement struct { - Database string `json:"database"` - RetentionPolicy string `json:"retentionPolicy"` - Name string `json:"name,omitempty"` - Regex *regexp.Regexp `json:"regex,omitempty"` - Type string `json:"type"` -} - -type Source struct { - influxql.Source -} - -func (s *Source) MarshalJSON() ([]byte, error) { - switch src := s.Source.(type) { - case *influxql.Measurement: - m := Measurement{ - Database: src.Database, - RetentionPolicy: src.RetentionPolicy, - Name: src.Name, - Type: "measurement", - } - if src.Regex != nil { - m.Regex = src.Regex.Val - } - return json.Marshal(m) - default: - return nil, fmt.Errorf("error marshaling source. Subqueries not supported yet") - } -} - -type Sources struct { - influxql.Sources -} - -// TODO: Handle subqueries -func (s *Sources) MarshalJSON() ([]byte, error) { - srcs := make([]Source, len(s.Sources)) - for i, src := range s.Sources { - srcs[i] = Source{src} - } - return json.Marshal(srcs) -} - -type Field struct { - *influxql.Field -} - -func (f *Field) MarshalJSON() ([]byte, error) { - b, err := MarshalJSON(f.Expr) - if err != nil { - return nil, err - } - column := json.RawMessage(b) - return json.Marshal(struct { - Alias string `json:"alias,omitempty"` - Column *json.RawMessage `json:"column"` - }{f.Alias, &column}) -} - -type Fields struct { - influxql.Fields -} - -func (f *Fields) MarshalJSON() ([]byte, error) { - fields := make([]Field, len(f.Fields)) - for i, field := range f.Fields { - fields[i] = Field{field} - } - - return json.Marshal(fields) -} - -type Condition struct { - influxql.Expr -} - -func (c *Condition) MarshalJSON() ([]byte, error) { - return MarshalJSON(c.Expr) -} - -type SortField struct { - *influxql.SortField -} - -func (s *SortField) MarshalJSON() ([]byte, error) { - var order string - if s.Ascending { - order = "ascending" - } else { - order = "descending" - } - - return json.Marshal(struct { - Name string `json:"name,omitempty"` - Order string `json:"order,omitempty"` - }{s.Name, order}) -} - -type SortFields struct { - influxql.SortFields -} - -func (f *SortFields) MarshalJSON() ([]byte, error) { - fields := make([]SortField, len(f.SortFields)) - for i, field := range f.SortFields { - fields[i] = SortField{field} - } - - return json.Marshal(fields) -} - -type Limits struct { - Limit int `json:"limit,omitempty"` - Offset int `json:"offset,omitempty"` - SLimit int `json:"slimit,omitempty"` - SOffset int `json:"soffset,omitempty"` -} - -type SelectStatement struct { - *influxql.SelectStatement -} - -func (s *SelectStatement) MarshalJSON() ([]byte, error) { - stmt := map[string]interface{}{ - "fields": &Fields{s.Fields}, - "sources": &Sources{s.Sources}, - } - if len(s.Dimensions) > 0 { - stmt["groupBy"] = &Dimensions{s.Dimensions, s.Fill, s.FillValue} - } - if s.Condition != nil { - stmt["condition"] = &Condition{s.Condition} - } - if s.Limit != 0 || s.Offset != 0 || s.SLimit != 0 || s.SOffset != 0 { - stmt["limits"] = &Limits{s.Limit, s.Offset, s.SLimit, s.SOffset} - } - if len(s.SortFields) > 0 { - stmt["orderbys"] = &SortFields{s.SortFields} - } - return json.Marshal(stmt) -} - -type Dimension struct { - *influxql.Dimension -} - -func (d *Dimension) MarshalJSON() ([]byte, error) { - switch v := d.Expr.(type) { - case *influxql.Call: - if v.Name != "time" { - return nil, errors.New("time dimension offset function must be now()") - } - // Make sure there is exactly one argument. - if got := len(v.Args); got < 1 || got > 2 { - return nil, errors.New("time dimension expected 1 or 2 arguments") - } - // Ensure the argument is a duration. - lit, ok := v.Args[0].(*influxql.DurationLiteral) - if !ok { - return nil, errors.New("time dimension must have duration argument") - } - var offset string - if len(v.Args) == 2 { - switch o := v.Args[1].(type) { - case *influxql.DurationLiteral: - offset = o.String() - case *influxql.Call: - if o.Name != "now" { - return nil, errors.New("time dimension offset function must be now()") - } else if len(o.Args) != 0 { - return nil, errors.New("time dimension offset now() function requires no arguments") - } - offset = "now()" - default: - return nil, errors.New("time dimension offset must be duration or now()") - } - } - return json.Marshal(struct { - Interval string `json:"interval"` - Offset string `json:"offset,omitempty"` - }{lit.String(), offset}) - case *influxql.VarRef: - return json.Marshal(v.Val) - case *influxql.Wildcard: - return json.Marshal(v.String()) - case *influxql.RegexLiteral: - return json.Marshal(v.String()) - } - return MarshalJSON(d.Expr) -} - -type Dimensions struct { - influxql.Dimensions - FillOption influxql.FillOption - FillValue interface{} -} - -func (d *Dimensions) MarshalJSON() ([]byte, error) { - groupBys := struct { - Time *json.RawMessage `json:"time,omitempty"` - Tags []*json.RawMessage `json:"tags,omitempty"` - Fill *json.RawMessage `json:"fill,omitempty"` - }{} - - for _, dim := range d.Dimensions { - switch dim.Expr.(type) { - case *influxql.Call: - octets, err := json.Marshal(&Dimension{dim}) - - if err != nil { - return nil, err - } - time := json.RawMessage(octets) - groupBys.Time = &time - default: - octets, err := json.Marshal(&Dimension{dim}) - if err != nil { - return nil, err - } - tag := json.RawMessage(octets) - groupBys.Tags = append(groupBys.Tags, &tag) - } - } - if d.FillOption != influxql.NullFill { - octets, err := json.Marshal(&Fill{d.FillOption, d.FillValue}) - if err != nil { - return nil, err - } - fill := json.RawMessage(octets) - groupBys.Fill = &fill - } - return json.Marshal(groupBys) -} diff --git a/chronograf/influx/queries/select_test.go b/chronograf/influx/queries/select_test.go deleted file mode 100644 index 0dfd2359862..00000000000 --- a/chronograf/influx/queries/select_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package queries - -import ( - "encoding/json" - "fmt" - "testing" - "time" -) - -func TestSelect(t *testing.T) { - tests := []struct { - q string - }{ - {q: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3::field) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY DESC LIMIT 20 OFFSET 10;`, time.Now().UTC().Format(time.RFC3339Nano))}, - {q: fmt.Sprintf(`SELECT difference(max(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, time.Now().UTC().Format(time.RFC3339Nano))}, - {q: `SELECT derivative(field1, 1h) / derivative(field2, 1h) FROM myseries`}, - {q: `SELECT mean("load1") FROM "system" WHERE "cluster_id" =~ /^$ClusterID$/ AND time > now() - 1h GROUP BY time(10m), "host" fill(null)`}, - {q: "SELECT max(\"n_cpus\") AS \"max_cpus\", non_negative_derivative(median(\"n_users\"), 5m) FROM \"system\" WHERE \"cluster_id\" =~ /^23/ AND \"host\" = 'prod-2ccccc04-us-east-1-data-3' AND time > now() - 15m GROUP BY time(15m, 10s),host,tag_x fill(10)"}, - {q: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"default\".\"cpu\" WHERE host =~ /\\./ AND time > now() - 1h"}, - {q: `SELECT 1 + "A" FROM howdy`}, - } - - for i, tt := range tests { - stmt, err := ParseSelect(tt.q) - if err != nil { - t.Errorf("Test %d query %s invalid statement: %v", i, tt.q, err) - } - _, err = json.MarshalIndent(stmt, "", " ") - if err != nil { - t.Errorf("Test %d query %s Unable to marshal statement: %v", i, tt.q, err) - } - } -} diff --git a/chronograf/influx/query.go b/chronograf/influx/query.go deleted file mode 100644 index fc8fe8038c7..00000000000 --- a/chronograf/influx/query.go +++ /dev/null @@ -1,537 +0,0 @@ -package influx - -import ( - "fmt" - "strconv" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxql" -) - -// TimeRangeAsEpochNano extracts the min and max epoch times from the expression -func TimeRangeAsEpochNano(expr influxql.Expr, now time.Time) (min, max int64, err error) { - // TODO(desa): is this OK? - _, trange, err := influxql.ConditionExpr(expr, nil) - if err != nil { - return 0, 0, err - } - tmin, tmax := trange.Min, trange.Max - if tmin.IsZero() { - min = time.Unix(0, influxql.MinTime).UnixNano() - } else { - min = tmin.UnixNano() - } - if tmax.IsZero() { - max = now.UnixNano() - } else { - max = tmax.UnixNano() - } - return -} - -// WhereToken is used to parse the time expression from an influxql query -const WhereToken = "WHERE" - -// ParseTime extracts the duration of the time range of the query -func ParseTime(influxQL string, now time.Time) (time.Duration, error) { - start := strings.Index(strings.ToUpper(influxQL), WhereToken) - if start == -1 { - return 0, fmt.Errorf("not a relative duration") - } - start += len(WhereToken) - where := influxQL[start:] - cond, err := influxql.ParseExpr(where) - if err != nil { - return 0, err - } - nowVal := &influxql.NowValuer{ - Now: now, - } - cond = influxql.Reduce(cond, nowVal) - min, max, err := TimeRangeAsEpochNano(cond, now) - if err != nil { - return 0, err - } - dur := time.Duration(max - min) - if dur < 0 { - dur = 0 - } - return dur, nil -} - -// Convert changes an InfluxQL query to a QueryConfig -func Convert(influxQL string) (chronograf.QueryConfig, error) { - itsDashboardTime := false - intervalTime := false - - if strings.Contains(influxQL, ":interval:") { - influxQL = strings.Replace(influxQL, ":interval:", "8675309ns", -1) - intervalTime = true - } - - if strings.Contains(influxQL, ":dashboardTime:") { - influxQL = strings.Replace(influxQL, ":dashboardTime:", "now() - 15m", 1) - itsDashboardTime = true - } - - query, err := influxql.ParseQuery(influxQL) - if err != nil { - return chronograf.QueryConfig{}, err - } - - if itsDashboardTime { - influxQL = strings.Replace(influxQL, "now() - 15m", ":dashboardTime:", 1) - } - - if intervalTime { - influxQL = strings.Replace(influxQL, "8675309ns", ":interval:", -1) - } - - raw := chronograf.QueryConfig{ - RawText: &influxQL, - Fields: []chronograf.Field{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: make(map[string][]string), - } - qc := chronograf.QueryConfig{ - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: make(map[string][]string), - } - - if len(query.Statements) != 1 { - return raw, nil - } - - stmt, ok := query.Statements[0].(*influxql.SelectStatement) - if !ok { - return raw, nil - } - - // Query config doesn't support limits - if stmt.Limit != 0 || stmt.Offset != 0 || stmt.SLimit != 0 || stmt.SOffset != 0 { - return raw, nil - } - - // Query config doesn't support sorting - if len(stmt.SortFields) > 0 { - return raw, nil - } - - // Query config doesn't allow SELECT INTO - if stmt.Target != nil { - return raw, nil - } - - // Query config only allows selecting from one source at a time. - if len(stmt.Sources) != 1 { - return raw, nil - } - - src := stmt.Sources[0] - measurement, ok := src.(*influxql.Measurement) - if !ok { - return raw, nil - } - - if measurement.Regex != nil { - return raw, nil - } - qc.Database = measurement.Database - qc.RetentionPolicy = measurement.RetentionPolicy - qc.Measurement = measurement.Name - - for _, dim := range stmt.Dimensions { - switch v := dim.Expr.(type) { - default: - return raw, nil - case *influxql.Call: - if v.Name != "time" { - return raw, nil - } - // Make sure there is exactly one argument. - if len(v.Args) != 1 { - return raw, nil - } - // Ensure the argument is a duration. - lit, ok := v.Args[0].(*influxql.DurationLiteral) - if !ok { - return raw, nil - } - if intervalTime { - qc.GroupBy.Time = "auto" - } else { - qc.GroupBy.Time = lit.String() - } - // Add fill to queryConfig only if there's a `GROUP BY time` - switch stmt.Fill { - case influxql.NullFill: - qc.Fill = "null" - case influxql.NoFill: - qc.Fill = "none" - case influxql.NumberFill: - qc.Fill = fmt.Sprint(stmt.FillValue) - case influxql.PreviousFill: - qc.Fill = "previous" - case influxql.LinearFill: - qc.Fill = "linear" - default: - return raw, nil - } - case *influxql.VarRef: - qc.GroupBy.Tags = append(qc.GroupBy.Tags, v.Val) - } - } - - qc.Fields = []chronograf.Field{} - for _, fld := range stmt.Fields { - switch f := fld.Expr.(type) { - default: - return raw, nil - case *influxql.Call: - // only support certain query config functions - if _, ok = supportedFuncs[f.Name]; !ok { - return raw, nil - } - - fldArgs := []chronograf.Field{} - for _, arg := range f.Args { - switch ref := arg.(type) { - case *influxql.VarRef: - fldArgs = append(fldArgs, chronograf.Field{ - Value: ref.Val, - Type: "field", - }) - case *influxql.IntegerLiteral: - fldArgs = append(fldArgs, chronograf.Field{ - Value: strconv.FormatInt(ref.Val, 10), - Type: "integer", - }) - case *influxql.NumberLiteral: - fldArgs = append(fldArgs, chronograf.Field{ - Value: strconv.FormatFloat(ref.Val, 'f', -1, 64), - Type: "number", - }) - case *influxql.RegexLiteral: - fldArgs = append(fldArgs, chronograf.Field{ - Value: ref.Val.String(), - Type: "regex", - }) - case *influxql.Wildcard: - fldArgs = append(fldArgs, chronograf.Field{ - Value: "*", - Type: "wildcard", - }) - default: - return raw, nil - } - } - - qc.Fields = append(qc.Fields, chronograf.Field{ - Value: f.Name, - Type: "func", - Alias: fld.Alias, - Args: fldArgs, - }) - case *influxql.VarRef: - if f.Type != influxql.Unknown { - return raw, nil - } - qc.Fields = append(qc.Fields, chronograf.Field{ - Value: f.Val, - Type: "field", - Alias: fld.Alias, - }) - } - } - - if stmt.Condition == nil { - return qc, nil - } - - reduced := influxql.Reduce(stmt.Condition, nil) - logic, ok := isTagLogic(reduced) - if !ok { - return raw, nil - } - - ops := map[string]bool{} - for _, l := range logic { - values, ok := qc.Tags[l.Tag] - if !ok { - values = []string{} - } - ops[l.Op] = true - values = append(values, l.Value) - qc.Tags[l.Tag] = values - } - - if len(logic) > 0 { - if len(ops) != 1 { - return raw, nil - } - if _, ok := ops["=="]; ok { - qc.AreTagsAccepted = true - } - } - - // If the condition has a time range we report back its duration - if dur, ok := hasTimeRange(stmt.Condition); ok { - if !itsDashboardTime { - qc.Range = &chronograf.DurationRange{ - Lower: "now() - " + shortDur(dur), - } - } - } - - return qc, nil -} - -// tagFilter represents a single tag that is filtered by some condition -type tagFilter struct { - Op string - Tag string - Value string -} - -func isTime(exp influxql.Expr) bool { - if p, ok := exp.(*influxql.ParenExpr); ok { - return isTime(p.Expr) - } else if ref, ok := exp.(*influxql.VarRef); ok && strings.ToLower(ref.Val) == "time" { - return true - } - return false -} - -func isNow(exp influxql.Expr) bool { - if p, ok := exp.(*influxql.ParenExpr); ok { - return isNow(p.Expr) - } else if call, ok := exp.(*influxql.Call); ok && strings.ToLower(call.Name) == "now" && len(call.Args) == 0 { - return true - } - return false -} - -func isDuration(exp influxql.Expr) (time.Duration, bool) { - switch e := exp.(type) { - case *influxql.ParenExpr: - return isDuration(e.Expr) - case *influxql.DurationLiteral: - return e.Val, true - case *influxql.NumberLiteral, *influxql.IntegerLiteral, *influxql.TimeLiteral: - return 0, false - } - return 0, false -} - -func isPreviousTime(exp influxql.Expr) (time.Duration, bool) { - if p, ok := exp.(*influxql.ParenExpr); ok { - return isPreviousTime(p.Expr) - } else if bin, ok := exp.(*influxql.BinaryExpr); ok { - now := isNow(bin.LHS) || isNow(bin.RHS) // either side can be now - op := bin.Op == influxql.SUB - dur, hasDur := isDuration(bin.LHS) - if !hasDur { - dur, hasDur = isDuration(bin.RHS) - } - return dur, now && op && hasDur - } else if isNow(exp) { // just comparing to now - return 0, true - } - return 0, false -} - -func isTimeRange(exp influxql.Expr) (time.Duration, bool) { - if p, ok := exp.(*influxql.ParenExpr); ok { - return isTimeRange(p.Expr) - } else if bin, ok := exp.(*influxql.BinaryExpr); ok { - tm := isTime(bin.LHS) || isTime(bin.RHS) // Either side could be time - op := false - switch bin.Op { - case influxql.LT, influxql.LTE, influxql.GT, influxql.GTE: - op = true - } - dur, prev := isPreviousTime(bin.LHS) - if !prev { - dur, prev = isPreviousTime(bin.RHS) - } - return dur, tm && op && prev - } - return 0, false -} - -func hasTimeRange(exp influxql.Expr) (time.Duration, bool) { - v := &timeRangeVisitor{} - influxql.Walk(v, exp) - return v.Duration, v.Ok -} - -// timeRangeVisitor implements influxql.Visitor to search for time ranges -type timeRangeVisitor struct { - Duration time.Duration - Ok bool -} - -func (v *timeRangeVisitor) Visit(n influxql.Node) influxql.Visitor { - if exp, ok := n.(influxql.Expr); !ok { - return nil - } else if dur, ok := isTimeRange(exp); ok { - v.Duration = dur - v.Ok = ok - return nil - } - return v -} - -func isTagLogic(exp influxql.Expr) ([]tagFilter, bool) { - if p, ok := exp.(*influxql.ParenExpr); ok { - return isTagLogic(p.Expr) - } - - if _, ok := isTimeRange(exp); ok { - return nil, true - } else if tf, ok := isTagFilter(exp); ok { - return []tagFilter{tf}, true - } - - bin, ok := exp.(*influxql.BinaryExpr) - if !ok { - return nil, false - } - - lhs, lhsOK := isTagFilter(bin.LHS) - rhs, rhsOK := isTagFilter(bin.RHS) - - if lhsOK && rhsOK && lhs.Tag == rhs.Tag && lhs.Op == rhs.Op && bin.Op == influxql.OR { - return []tagFilter{lhs, rhs}, true - } - - if bin.Op != influxql.AND && bin.Op != influxql.OR { - return nil, false - } - - _, tm := isTimeRange(bin.LHS) - if !tm { - _, tm = isTimeRange(bin.RHS) - } - tf := lhsOK || rhsOK - if tm && tf { - if lhsOK { - return []tagFilter{lhs}, true - } - return []tagFilter{rhs}, true - } - - tlLHS, lhsOK := isTagLogic(bin.LHS) - tlRHS, rhsOK := isTagLogic(bin.RHS) - if lhsOK && rhsOK { - ops := map[string]bool{} // there must only be one kind of ops - for _, tf := range tlLHS { - ops[tf.Op] = true - } - for _, tf := range tlRHS { - ops[tf.Op] = true - } - if len(ops) > 1 { - return nil, false - } - return append(tlLHS, tlRHS...), true - } - return nil, false -} - -func isVarRef(exp influxql.Expr) bool { - if p, ok := exp.(*influxql.ParenExpr); ok { - return isVarRef(p.Expr) - } else if _, ok := exp.(*influxql.VarRef); ok { - return true - } - return false -} - -func isString(exp influxql.Expr) bool { - if p, ok := exp.(*influxql.ParenExpr); ok { - return isString(p.Expr) - } else if _, ok := exp.(*influxql.StringLiteral); ok { - return true - } - return false -} - -func isTagFilter(exp influxql.Expr) (tagFilter, bool) { - switch expr := exp.(type) { - default: - return tagFilter{}, false - case *influxql.ParenExpr: - return isTagFilter(expr.Expr) - case *influxql.BinaryExpr: - var Op string - if expr.Op == influxql.EQ { - Op = "==" - } else if expr.Op == influxql.NEQ { - Op = "!=" - } else { - return tagFilter{}, false - } - - hasValue := isString(expr.LHS) || isString(expr.RHS) - hasTag := isVarRef(expr.LHS) || isVarRef(expr.RHS) - if !(hasValue && hasTag) { - return tagFilter{}, false - } - - value := "" - tag := "" - // Either tag op value or value op tag - if isVarRef(expr.LHS) { - t, _ := expr.LHS.(*influxql.VarRef) - tag = t.Val - v, _ := expr.RHS.(*influxql.StringLiteral) - value = v.Val - } else { - t, _ := expr.RHS.(*influxql.VarRef) - tag = t.Val - v, _ := expr.LHS.(*influxql.StringLiteral) - value = v.Val - } - - return tagFilter{ - Op: Op, - Tag: tag, - Value: value, - }, true - } -} - -var supportedFuncs = map[string]bool{ - "mean": true, - "median": true, - "count": true, - "min": true, - "max": true, - "sum": true, - "first": true, - "last": true, - "spread": true, - "stddev": true, - "percentile": true, - "top": true, - "bottom": true, -} - -// shortDur converts duration into the queryConfig duration format -func shortDur(d time.Duration) string { - s := d.String() - if strings.HasSuffix(s, "m0s") { - s = s[:len(s)-2] - } - if strings.HasSuffix(s, "h0m") { - s = s[:len(s)-2] - } - return s -} diff --git a/chronograf/influx/query_test.go b/chronograf/influx/query_test.go deleted file mode 100644 index 0927343814c..00000000000 --- a/chronograf/influx/query_test.go +++ /dev/null @@ -1,810 +0,0 @@ -package influx - -import ( - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestConvert(t *testing.T) { - tests := []struct { - name string - influxQL string - RawText string - want chronograf.QueryConfig - wantErr bool - }{ - { - name: "Test field order", - influxQL: `SELECT "usage_idle", "usage_guest_nice", "usage_system", "usage_guest" FROM "telegraf"."autogen"."cpu" WHERE time > :dashboardTime:`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_idle", - Type: "field", - }, - chronograf.Field{ - Value: "usage_guest_nice", - Type: "field", - }, - chronograf.Field{ - Value: "usage_system", - Type: "field", - }, - chronograf.Field{ - Value: "usage_guest", - Type: "field", - }, - }, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test field function order", - influxQL: `SELECT mean("usage_idle"), median("usage_idle"), count("usage_guest_nice"), mean("usage_guest_nice") FROM "telegraf"."autogen"."cpu" WHERE time > :dashboardTime:`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - }, - }, - chronograf.Field{ - Value: "median", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - }, - }, - chronograf.Field{ - Value: "count", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_guest_nice", - Type: "field", - }, - }, - }, - chronograf.Field{ - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_guest_nice", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test named count field", - influxQL: `SELECT moving_average(mean("count"),14) FROM "usage_computed"."autogen".unique_clusters_by_day WHERE time > now() - 90d AND product = 'influxdb' group by time(1d)`, - RawText: `SELECT moving_average(mean("count"),14) FROM "usage_computed"."autogen".unique_clusters_by_day WHERE time > now() - 90d AND product = 'influxdb' group by time(1d)`, - want: chronograf.QueryConfig{ - Fields: []chronograf.Field{}, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test math", - influxQL: `SELECT count("event_id")/3 as "event_count_id" from discource.autogen.discourse_events where time > now() - 7d group by time(1d), "event_type"`, - RawText: `SELECT count("event_id")/3 as "event_count_id" from discource.autogen.discourse_events where time > now() - 7d group by time(1d), "event_type"`, - want: chronograf.QueryConfig{ - Fields: []chronograf.Field{}, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test range", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now() - 15m`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{"host": []string{"myhost"}}, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: false, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test invalid range", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now() - 15`, - RawText: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now() - 15`, - want: chronograf.QueryConfig{ - Fields: []chronograf.Field{}, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test range with no duration", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now()`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{"host": []string{"myhost"}}, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: false, - Range: &chronograf.DurationRange{ - Lower: "now() - 0s", - }, - }, - }, - { - name: "Test range with no tags", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where time > now() - 15m`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Tags: map[string][]string{}, - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_user", - Type: "field", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: false, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test range with no tags nor duration", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where time`, - RawText: `SELECT usage_user from telegraf.autogen.cpu where time`, - want: chronograf.QueryConfig{ - Fields: []chronograf.Field{}, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test with no time range", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time`, - RawText: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time`, - want: chronograf.QueryConfig{ - Fields: []chronograf.Field{}, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test with no where clauses", - influxQL: `SELECT usage_user from telegraf.autogen.cpu`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - }, - }, - { - name: "Test tags accepted", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" = 'myhost' and time > now() - 15m`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{"host": []string{"myhost"}}, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: true, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - Upper: "", - }, - }, - }, - { - name: "Test multible tags not accepted", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where time > now() - 15m and "host" != 'myhost' and "cpu" != 'cpu-total'`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "myhost", - }, - "cpu": []string{ - "cpu-total", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: false, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - Upper: "", - }, - }, - }, - { - name: "Test mixed tag logic", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where ("host" = 'myhost' or "this" = 'those') and ("howdy" != 'doody') and time > now() - 15m`, - RawText: `SELECT usage_user from telegraf.autogen.cpu where ("host" = 'myhost' or "this" = 'those') and ("howdy" != 'doody') and time > now() - 15m`, - want: chronograf.QueryConfig{ - Fields: []chronograf.Field{}, - Tags: map[string][]string{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - }, - }, - { - name: "Test tags accepted", - influxQL: `SELECT usage_user from telegraf.autogen.cpu where ("host" = 'myhost' OR "host" = 'yourhost') and ("these" = 'those') and time > now() - 15m`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_user", - Type: "field", - }, - }, - Tags: map[string][]string{ - "host": []string{"myhost", "yourhost"}, - "these": []string{"those"}, - }, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: true, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Complex Logic with tags not accepted", - influxQL: `SELECT "usage_idle", "usage_guest_nice", "usage_system", "usage_guest" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m AND ("cpu"!='cpu-total' OR "cpu"!='cpu0') AND ("host"!='dev-052978d6-us-east-2-meta-0' OR "host"!='dev-052978d6-us-east-2-data-5' OR "host"!='dev-052978d6-us-east-2-data-4' OR "host"!='dev-052978d6-us-east-2-data-3')`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_idle", - Type: "field", - }, - chronograf.Field{ - Value: "usage_guest_nice", - Type: "field", - }, - chronograf.Field{ - Value: "usage_system", - Type: "field", - }, - chronograf.Field{ - Value: "usage_guest", - Type: "field", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "dev-052978d6-us-east-2-meta-0", - "dev-052978d6-us-east-2-data-5", - "dev-052978d6-us-east-2-data-4", - "dev-052978d6-us-east-2-data-3", - }, - "cpu": []string{ - "cpu-total", - "cpu0", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: false, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Complex Logic with tags accepted", - influxQL: `SELECT "usage_idle", "usage_guest_nice", "usage_system", "usage_guest" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m AND ("cpu" = 'cpu-total' OR "cpu" = 'cpu0') AND ("host" = 'dev-052978d6-us-east-2-meta-0' OR "host" = 'dev-052978d6-us-east-2-data-5' OR "host" = 'dev-052978d6-us-east-2-data-4' OR "host" = 'dev-052978d6-us-east-2-data-3')`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "usage_idle", - Type: "field", - }, - chronograf.Field{ - Value: "usage_guest_nice", - Type: "field", - }, - chronograf.Field{ - Value: "usage_system", - Type: "field", - }, - chronograf.Field{ - Value: "usage_guest", - Type: "field", - }, - }, - Tags: map[string][]string{ - "host": []string{ - "dev-052978d6-us-east-2-meta-0", - "dev-052978d6-us-east-2-data-5", - "dev-052978d6-us-east-2-data-4", - "dev-052978d6-us-east-2-data-3", - }, - "cpu": []string{ - "cpu-total", - "cpu0", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "", - Tags: []string{}, - }, - AreTagsAccepted: true, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test explicit non-null fill accepted", - influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(linear)`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "1m", - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - Fill: "linear", - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test explicit null fill accepted", - influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(null)`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "1m", - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - Fill: "null", - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test implicit null fill accepted and made explicit", - influxQL: `SELECT mean("usage_idle") as "mean_usage_idle" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m)`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "mean", - Type: "func", - Alias: "mean_usage_idle", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "1m", - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - Fill: "null", - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test percentile with a number parameter", - influxQL: `SELECT percentile("usage_idle", 3.14) as "mean_usage_idle" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m)`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "percentile", - Type: "func", - Alias: "mean_usage_idle", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - chronograf.Field{ - Value: "3.14", - Type: "number", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "1m", - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - Fill: "null", - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test top with 2 arguments", - influxQL: `SELECT TOP("water_level","location",2) FROM "h2o_feet"`, - want: chronograf.QueryConfig{ - Measurement: "h2o_feet", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "top", - Type: "func", - Args: []chronograf.Field{ - { - Value: "water_level", - Type: "field", - }, - chronograf.Field{ - Value: "location", - Type: "field", - }, - chronograf.Field{ - Value: "2", - Type: "integer", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - }, - }, - { - name: "count of a regex", - influxQL: ` SELECT COUNT(/water/) FROM "h2o_feet"`, - want: chronograf.QueryConfig{ - Measurement: "h2o_feet", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "count", - Type: "func", - Args: []chronograf.Field{ - { - Value: "water", - Type: "regex", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - }, - }, - { - name: "count with aggregate", - influxQL: `SELECT COUNT(water) as "count_water" FROM "h2o_feet"`, - want: chronograf.QueryConfig{ - Measurement: "h2o_feet", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "count", - Type: "func", - Alias: "count_water", - Args: []chronograf.Field{ - { - Value: "water", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - }, - }, - { - name: "count of a wildcard", - influxQL: ` SELECT COUNT(*) FROM "h2o_feet"`, - want: chronograf.QueryConfig{ - Measurement: "h2o_feet", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "count", - Type: "func", - Args: []chronograf.Field{ - { - Value: "*", - Type: "wildcard", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - }, - }, - { - name: "Test fill number (int) accepted", - influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(1337)`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "1m", - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - Fill: "1337", - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test fill number (float) accepted", - influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(1.337)`, - want: chronograf.QueryConfig{ - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - chronograf.Field{ - Value: "mean", - Type: "func", - Args: []chronograf.Field{ - { - Value: "usage_idle", - Type: "field", - }, - }, - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "1m", - Tags: []string{}, - }, - Tags: map[string][]string{}, - AreTagsAccepted: false, - Fill: "1.337", - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - { - name: "Test invalid fill rejected", - influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(LINEAR)`, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := Convert(tt.influxQL) - if (err != nil) != tt.wantErr { - t.Errorf("Convert() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.RawText != "" { - tt.want.RawText = &tt.RawText - if got.RawText == nil { - t.Errorf("Convert() = nil, want %s", tt.RawText) - } else if *got.RawText != tt.RawText { - t.Errorf("Convert() = %s, want %s", *got.RawText, tt.RawText) - } - } - if !cmp.Equal(got, tt.want) { - t.Errorf("Convert() = %s", cmp.Diff(got, tt.want)) - } - }) - } -} - -func TestParseTime(t *testing.T) { - tests := []struct { - name string - influxQL string - now string - want time.Duration - wantErr bool - }{ - { - name: "time equal", - now: "2000-01-01T00:00:00Z", - influxQL: `SELECT mean("numSeries") AS "mean_numSeries" FROM "_internal"."monitor"."database" WHERE time > now() - 1h and time < now() - 1h GROUP BY :interval: FILL(null);`, - want: 0, - }, - { - name: "time shifted by one hour", - now: "2000-01-01T00:00:00Z", - influxQL: `SELECT mean("numSeries") AS "mean_numSeries" FROM "_internal"."monitor"."database" WHERE time > now() - 1h - 1h and time < now() - 1h GROUP BY :interval: FILL(null);`, - want: 3599999999998, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - now, err := time.Parse(time.RFC3339, tt.now) - if err != nil { - t.Fatalf("%v", err) - } - got, err := ParseTime(tt.influxQL, now) - if (err != nil) != tt.wantErr { - t.Errorf("ParseTime() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Logf("%d", got) - t.Errorf("ParseTime() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/influx/users.go b/chronograf/influx/users.go deleted file mode 100644 index a41a35e78ba..00000000000 --- a/chronograf/influx/users.go +++ /dev/null @@ -1,230 +0,0 @@ -package influx - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Add a new User in InfluxDB -func (c *Client) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - _, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`CREATE USER "%s" WITH PASSWORD '%s'`, u.Name, u.Passwd), - }) - if err != nil { - return nil, err - } - for _, p := range u.Permissions { - if err := c.grantPermission(ctx, u.Name, p); err != nil { - return nil, err - } - } - return c.Get(ctx, chronograf.UserQuery{Name: &u.Name}) -} - -// Delete the User from InfluxDB -func (c *Client) Delete(ctx context.Context, u *chronograf.User) error { - res, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`DROP USER "%s"`, u.Name), - }) - if err != nil { - return err - } - // The DROP USER statement puts the error within the results itself - // So, we have to crack open the results to see what happens - octets, err := res.MarshalJSON() - if err != nil { - return err - } - - results := make([]struct{ Error string }, 0) - if err := json.Unmarshal(octets, &results); err != nil { - return err - } - - // At last, we can check if there are any error strings - for _, r := range results { - if r.Error != "" { - return fmt.Errorf(r.Error) - } - } - return nil -} - -// Get retrieves a user if name exists. -func (c *Client) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil { - return nil, fmt.Errorf("query must specify name") - } - - users, err := c.showUsers(ctx) - if err != nil { - return nil, err - } - - for _, user := range users { - if user.Name == *q.Name { - perms, err := c.userPermissions(ctx, user.Name) - if err != nil { - return nil, err - } - user.Permissions = append(user.Permissions, perms...) - return &user, nil - } - } - - return nil, fmt.Errorf("user not found") -} - -// Update the user's permissions or roles -func (c *Client) Update(ctx context.Context, u *chronograf.User) error { - // Only allow one type of change at a time. If it is a password - // change then do it and return without any changes to permissions - if u.Passwd != "" { - return c.updatePassword(ctx, u.Name, u.Passwd) - } - - user, err := c.Get(ctx, chronograf.UserQuery{Name: &u.Name}) - if err != nil { - return err - } - - revoke, add := Difference(u.Permissions, user.Permissions) - for _, a := range add { - if err := c.grantPermission(ctx, u.Name, a); err != nil { - return err - } - } - - for _, r := range revoke { - if err := c.revokePermission(ctx, u.Name, r); err != nil { - return err - } - } - return nil -} - -// All users in influx -func (c *Client) All(ctx context.Context) ([]chronograf.User, error) { - users, err := c.showUsers(ctx) - if err != nil { - return nil, err - } - - // For all users we need to look up permissions to add to the user. - for i, user := range users { - perms, err := c.userPermissions(ctx, user.Name) - if err != nil { - return nil, err - } - - user.Permissions = append(user.Permissions, perms...) - users[i] = user - } - return users, nil -} - -// Num is the number of users in DB -func (c *Client) Num(ctx context.Context) (int, error) { - all, err := c.All(ctx) - if err != nil { - return 0, err - } - - return len(all), nil -} - -// showUsers runs SHOW USERS InfluxQL command and returns chronograf users. -func (c *Client) showUsers(ctx context.Context) ([]chronograf.User, error) { - res, err := c.Query(ctx, chronograf.Query{ - Command: `SHOW USERS`, - }) - if err != nil { - return nil, err - } - octets, err := res.MarshalJSON() - if err != nil { - return nil, err - } - - results := showResults{} - if err := json.Unmarshal(octets, &results); err != nil { - return nil, err - } - - return results.Users(), nil -} - -func (c *Client) grantPermission(ctx context.Context, username string, perm chronograf.Permission) error { - query := ToGrant(username, perm) - if query == "" { - return nil - } - - _, err := c.Query(ctx, chronograf.Query{ - Command: query, - }) - return err -} - -func (c *Client) revokePermission(ctx context.Context, username string, perm chronograf.Permission) error { - query := ToRevoke(username, perm) - if query == "" { - return nil - } - - _, err := c.Query(ctx, chronograf.Query{ - Command: query, - }) - return err -} - -func (c *Client) userPermissions(ctx context.Context, name string) (chronograf.Permissions, error) { - res, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`SHOW GRANTS FOR "%s"`, name), - }) - if err != nil { - return nil, err - } - - octets, err := res.MarshalJSON() - if err != nil { - return nil, err - } - - results := showResults{} - if err := json.Unmarshal(octets, &results); err != nil { - return nil, err - } - return results.Permissions(), nil -} - -func (c *Client) updatePassword(ctx context.Context, name, passwd string) error { - res, err := c.Query(ctx, chronograf.Query{ - Command: fmt.Sprintf(`SET PASSWORD for "%s" = '%s'`, name, passwd), - }) - if err != nil { - return err - } - // The SET PASSWORD statements puts the error within the results itself - // So, we have to crack open the results to see what happens - octets, err := res.MarshalJSON() - if err != nil { - return err - } - - results := make([]struct{ Error string }, 0) - if err := json.Unmarshal(octets, &results); err != nil { - return err - } - - // At last, we can check if there are any error strings - for _, r := range results { - if r.Error != "" { - return fmt.Errorf(r.Error) - } - } - return nil -} diff --git a/chronograf/influx/users_test.go b/chronograf/influx/users_test.go deleted file mode 100644 index 1936e9992fa..00000000000 --- a/chronograf/influx/users_test.go +++ /dev/null @@ -1,1126 +0,0 @@ -package influx - -import ( - "context" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestClient_userPermissions(t *testing.T) { - t.Parallel() - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - showGrants []byte - status int - args args - want chronograf.Permissions - wantErr bool - }{ - { - name: "Check all grants", - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - status: http.StatusOK, - args: args{ - ctx: context.Background(), - name: "docbrown", - }, - want: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - { - name: "Permission Denied", - status: http.StatusUnauthorized, - args: args{ - ctx: context.Background(), - name: "docbrown", - }, - wantErr: true, - }, - { - name: "bad JSON", - showGrants: []byte(`{"results":[{"series":"adffdadf"}]}`), - status: http.StatusOK, - args: args{ - ctx: context.Background(), - name: "docbrown", - }, - wantErr: true, - }, - } - - for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - rw.WriteHeader(tt.status) - rw.Write(tt.showGrants) - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - - got, err := c.userPermissions(tt.args.ctx, tt.args.name) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.userPermissions() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.userPermissions() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestClient_Add(t *testing.T) { - t.Parallel() - type args struct { - ctx context.Context - u *chronograf.User - } - tests := []struct { - name string - args args - status int - want *chronograf.User - wantQueries []string - wantErr bool - }{ - { - name: "Create User", - status: http.StatusOK, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Passwd: "Dont Need Roads", - }, - }, - wantQueries: []string{ - `CREATE USER "docbrown" WITH PASSWORD 'Dont Need Roads'`, - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - }, - want: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{ - "ALL", - }, - }, - }, - }, - }, - { - name: "Create User with permissions", - status: http.StatusOK, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Passwd: "Dont Need Roads", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{ - "ALL", - }, - }, - }, - }, - }, - wantQueries: []string{ - `CREATE USER "docbrown" WITH PASSWORD 'Dont Need Roads'`, - `GRANT ALL PRIVILEGES TO "docbrown"`, - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - }, - want: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{ - "ALL", - }, - }, - }, - }, - }, - { - name: "Permission Denied", - status: http.StatusUnauthorized, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Passwd: "Dont Need Roads", - }, - }, - wantQueries: []string{`CREATE USER "docbrown" WITH PASSWORD 'Dont Need Roads'`}, - wantErr: true, - }, - } - for _, tt := range tests { - queries := []string{} - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - queries = append(queries, r.URL.Query().Get("q")) - rw.WriteHeader(tt.status) - rw.Write([]byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`)) - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - got, err := c.Add(tt.args.ctx, tt.args.u) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if len(tt.wantQueries) != len(queries) { - t.Errorf("%q. Client.Add() queries = %v, want %v", tt.name, queries, tt.wantQueries) - continue - } - for i := range tt.wantQueries { - if tt.wantQueries[i] != queries[i] { - t.Errorf("%q. Client.Add() query = %v, want %v", tt.name, queries[i], tt.wantQueries[i]) - } - } - - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.Add() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestClient_Delete(t *testing.T) { - type args struct { - ctx context.Context - u *chronograf.User - } - tests := []struct { - name string - status int - dropUser []byte - args args - wantErr bool - }{ - { - name: "Drop User", - dropUser: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - status: http.StatusOK, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - }, - }, - }, - { - name: "No such user", - dropUser: []byte(`{"results":[{"error":"user not found"}]}`), - status: http.StatusOK, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - }, - }, - wantErr: true, - }, - { - name: "Bad InfluxQL", - dropUser: []byte(`{"error":"error parsing query: found doody, expected ; at line 1, char 17"}`), - status: http.StatusBadRequest, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - }, - }, - wantErr: true, - }, - { - name: "Bad JSON", - dropUser: []byte(`{"results":[{"error":breakhere}]}`), - status: http.StatusOK, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - rw.WriteHeader(tt.status) - rw.Write(tt.dropUser) - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - - if err := c.Delete(tt.args.ctx, tt.args.u); (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func TestClient_Get(t *testing.T) { - type args struct { - ctx context.Context - name string - } - tests := []struct { - name string - args args - statusUsers int - showUsers []byte - statusGrants int - showGrants []byte - want *chronograf.User - wantErr bool - }{ - { - name: "Get User", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - args: args{ - ctx: context.Background(), - name: "docbrown", - }, - want: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: "all", - Allowed: []string{"ALL"}, - }, - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - { - name: "Fail show users", - statusUsers: http.StatusBadRequest, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - args: args{ - ctx: context.Background(), - name: "docbrown", - }, - wantErr: true, - }, - { - name: "Fail show grants", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusBadRequest, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - args: args{ - ctx: context.Background(), - name: "docbrown", - }, - wantErr: true, - }, - { - name: "Fail no such user", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true]]}]}]}`), - args: args{ - ctx: context.Background(), - name: "docbrown", - }, - wantErr: true, - }, - } - for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - query := r.URL.Query().Get("q") - if strings.Contains(query, "GRANTS") { - rw.WriteHeader(tt.statusGrants) - rw.Write(tt.showGrants) - } else if strings.Contains(query, "USERS") { - rw.WriteHeader(tt.statusUsers) - rw.Write(tt.showUsers) - } - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - got, err := c.Get(tt.args.ctx, chronograf.UserQuery{Name: &tt.args.name}) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.Get() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestClient_grantPermission(t *testing.T) { - type args struct { - ctx context.Context - username string - perm chronograf.Permission - } - tests := []struct { - name string - args args - status int - results []byte - wantQuery string - wantErr bool - }{ - { - name: "simple grants", - status: http.StatusOK, - results: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - username: "docbrown", - perm: chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - wantQuery: `GRANT ALL ON "mydb" TO "docbrown"`, - }, - { - name: "bad grants", - status: http.StatusOK, - results: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - username: "docbrown", - perm: chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"howdy"}, - }, - }, - wantQuery: ``, - }, - { - name: "no grants", - status: http.StatusOK, - results: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - username: "docbrown", - perm: chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{}, - }, - }, - wantQuery: ``, - }, - } - for _, tt := range tests { - query := "" - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - query = r.URL.Query().Get("q") - rw.WriteHeader(tt.status) - rw.Write(tt.results) - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - if err := c.grantPermission(tt.args.ctx, tt.args.username, tt.args.perm); (err != nil) != tt.wantErr { - t.Errorf("%q. Client.grantPermission() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if query != tt.wantQuery { - t.Errorf("%q. Client.grantPermission() = %v, want %v", tt.name, query, tt.wantQuery) - } - } -} - -func TestClient_revokePermission(t *testing.T) { - type args struct { - ctx context.Context - username string - perm chronograf.Permission - } - tests := []struct { - name string - args args - status int - results []byte - wantQuery string - wantErr bool - }{ - { - name: "simple revoke", - status: http.StatusOK, - results: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - username: "docbrown", - perm: chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - wantQuery: `REVOKE ALL ON "mydb" FROM "docbrown"`, - }, - { - name: "bad revoke", - status: http.StatusOK, - results: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - username: "docbrown", - perm: chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"howdy"}, - }, - }, - wantQuery: ``, - }, - { - name: "no permissions", - status: http.StatusOK, - results: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - username: "docbrown", - perm: chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{}, - }, - }, - wantQuery: `REVOKE ALL PRIVILEGES ON "mydb" FROM "docbrown"`, - }, - } - for _, tt := range tests { - query := "" - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - query = r.URL.Query().Get("q") - rw.WriteHeader(tt.status) - rw.Write(tt.results) - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - if err := c.revokePermission(tt.args.ctx, tt.args.username, tt.args.perm); (err != nil) != tt.wantErr { - t.Errorf("%q. Client.revokePermission() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if query != tt.wantQuery { - t.Errorf("%q. Client.revokePermission() = %v, want %v", tt.name, query, tt.wantQuery) - } - } -} - -func TestClient_Num(t *testing.T) { - type args struct { - ctx context.Context - } - tests := []struct { - name string - args args - statusUsers int - showUsers []byte - statusGrants int - showGrants []byte - want []chronograf.User - wantErr bool - }{ - { - name: "All Users", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - args: args{ - ctx: context.Background(), - }, - want: []chronograf.User{ - { - Name: "admin", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: "all", - Allowed: []string{"ALL"}, - }, - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - { - Name: "docbrown", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: "all", - Allowed: []string{"ALL"}, - }, - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - { - Name: "reader", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - }, - } - for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - query := r.URL.Query().Get("q") - if strings.Contains(query, "GRANTS") { - rw.WriteHeader(tt.statusGrants) - rw.Write(tt.showGrants) - } else if strings.Contains(query, "USERS") { - rw.WriteHeader(tt.statusUsers) - rw.Write(tt.showUsers) - } - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - got, err := c.Num(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Num() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != len(tt.want) { - t.Errorf("%q. Client.Num() = %v, want %v", tt.name, got, len(tt.want)) - } - } -} - -func TestClient_All(t *testing.T) { - type args struct { - ctx context.Context - } - tests := []struct { - name string - args args - statusUsers int - showUsers []byte - statusGrants int - showGrants []byte - want []chronograf.User - wantErr bool - }{ - { - name: "All Users", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - args: args{ - ctx: context.Background(), - }, - want: []chronograf.User{ - { - Name: "admin", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: "all", - Allowed: []string{"ALL"}, - }, - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - { - Name: "docbrown", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: "all", - Allowed: []string{"ALL"}, - }, - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - { - Name: "reader", - Permissions: chronograf.Permissions{ - chronograf.Permission{ - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - }, - { - name: "Unauthorized", - statusUsers: http.StatusUnauthorized, - showUsers: []byte(`{}`), - args: args{ - ctx: context.Background(), - }, - wantErr: true, - }, - { - name: "Permission error", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusBadRequest, - showGrants: []byte(`{}`), - args: args{ - ctx: context.Background(), - }, - wantErr: true, - }, - } - for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - query := r.URL.Query().Get("q") - if strings.Contains(query, "GRANTS") { - rw.WriteHeader(tt.statusGrants) - rw.Write(tt.showGrants) - } else if strings.Contains(query, "USERS") { - rw.WriteHeader(tt.statusUsers) - rw.Write(tt.showUsers) - } - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - got, err := c.All(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. Client.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. Client.All() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func TestClient_Update(t *testing.T) { - type args struct { - ctx context.Context - u *chronograf.User - } - tests := []struct { - name string - statusUsers int - showUsers []byte - statusGrants int - showGrants []byte - statusRevoke int - revoke []byte - statusGrant int - grant []byte - statusPassword int - password []byte - args args - want []string - wantErr bool - }{ - { - name: "Change Password", - statusPassword: http.StatusOK, - password: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Passwd: "hunter2", - }, - }, - want: []string{ - `SET PASSWORD for "docbrown" = 'hunter2'`, - }, - }, - { - name: "Grant all permissions", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - statusRevoke: http.StatusOK, - revoke: []byte(`{"results":[]}`), - statusGrant: http.StatusOK, - grant: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - { - Scope: "all", - Allowed: []string{"all"}, - }, - { - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - want: []string{ - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - `GRANT ALL PRIVILEGES TO "docbrown"`, - `GRANT ALL ON "mydb" TO "docbrown"`, - }, - }, - { - name: "Revoke all permissions", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - statusRevoke: http.StatusOK, - revoke: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - }, - }, - want: []string{ - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - `REVOKE ALL PRIVILEGES FROM "docbrown"`, - `REVOKE ALL ON "mydb" FROM "docbrown"`, - }, - }, - { - name: "Grant all permissions", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - statusRevoke: http.StatusOK, - revoke: []byte(`{"results":[]}`), - statusGrant: http.StatusOK, - grant: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - { - Scope: "all", - Allowed: []string{"all"}, - }, - { - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - want: []string{ - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - `GRANT ALL PRIVILEGES TO "docbrown"`, - `GRANT ALL ON "mydb" TO "docbrown"`, - }, - }, - { - name: "Revoke some add some", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - statusRevoke: http.StatusOK, - revoke: []byte(`{"results":[]}`), - statusGrant: http.StatusOK, - grant: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - { - Scope: "all", - Allowed: []string{}, - }, - { - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE"}, - }, - { - Scope: "database", - Name: "newdb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - want: []string{ - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - `GRANT WRITE ON "mydb" TO "docbrown"`, - `GRANT ALL ON "newdb" TO "docbrown"`, - `REVOKE ALL PRIVILEGES FROM "docbrown"`, - }, - }, - { - name: "Revoke some", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",false],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[]}`), - statusRevoke: http.StatusOK, - revoke: []byte(`{"results":[]}`), - statusGrant: http.StatusOK, - grant: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - { - Scope: "all", - Allowed: []string{"ALL"}, - }, - }, - }, - }, - want: []string{ - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - `GRANT ALL PRIVILEGES TO "docbrown"`, - }, - }, - { - name: "Fail users", - statusUsers: http.StatusBadRequest, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - statusRevoke: http.StatusOK, - revoke: []byte(`{"results":[]}`), - statusGrant: http.StatusOK, - grant: []byte(`{"results":[]}`), - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - }, - }, - wantErr: true, - want: []string{ - `SHOW USERS`, - }, - }, - { - name: "fail grants", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - statusRevoke: http.StatusOK, - revoke: []byte(`{"results":[]}`), - statusGrant: http.StatusBadRequest, - grant: []byte(`{"results":[]}`), - wantErr: true, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - { - Scope: "all", - Allowed: []string{}, - }, - { - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE"}, - }, - { - Scope: "database", - Name: "newdb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - want: []string{ - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - `GRANT WRITE ON "mydb" TO "docbrown"`, - }, - }, - { - name: "fail revoke", - statusUsers: http.StatusOK, - showUsers: []byte(`{"results":[{"series":[{"columns":["user","admin"],"values":[["admin",true],["docbrown",true],["reader",false]]}]}]}`), - statusGrants: http.StatusOK, - showGrants: []byte(`{"results":[{"series":[{"columns":["database","privilege"],"values":[["mydb","ALL PRIVILEGES"]]}]}]}`), - statusRevoke: http.StatusBadRequest, - revoke: []byte(`{"results":[]}`), - statusGrant: http.StatusOK, - grant: []byte(`{"results":[]}`), - wantErr: true, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "docbrown", - Permissions: chronograf.Permissions{ - { - Scope: "all", - Allowed: []string{}, - }, - { - Scope: "database", - Name: "mydb", - Allowed: []string{"WRITE"}, - }, - { - Scope: "database", - Name: "newdb", - Allowed: []string{"WRITE", "READ"}, - }, - }, - }, - }, - want: []string{ - `SHOW USERS`, - `SHOW GRANTS FOR "docbrown"`, - `GRANT WRITE ON "mydb" TO "docbrown"`, - `GRANT ALL ON "newdb" TO "docbrown"`, - `REVOKE ALL PRIVILEGES FROM "docbrown"`, - }, - }, - } - for _, tt := range tests { - queries := []string{} - ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path := r.URL.Path; path != "/query" { - t.Error("Expected the path to contain `/query` but was", path) - } - query := r.URL.Query().Get("q") - if strings.Contains(query, "GRANTS") { - rw.WriteHeader(tt.statusGrants) - rw.Write(tt.showGrants) - } else if strings.Contains(query, "USERS") { - rw.WriteHeader(tt.statusUsers) - rw.Write(tt.showUsers) - } else if strings.Contains(query, "REVOKE") { - rw.WriteHeader(tt.statusRevoke) - rw.Write(tt.revoke) - } else if strings.Contains(query, "GRANT") { - rw.WriteHeader(tt.statusGrant) - rw.Write(tt.grant) - } else if strings.Contains(query, "PASSWORD") { - rw.WriteHeader(tt.statusPassword) - rw.Write(tt.password) - } - queries = append(queries, query) - })) - u, _ := url.Parse(ts.URL) - c := &Client{ - URL: u, - Logger: &chronograf.NoopLogger{}, - } - defer ts.Close() - if err := c.Update(tt.args.ctx, tt.args.u); (err != nil) != tt.wantErr { - t.Errorf("%q. Client.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if !reflect.DeepEqual(queries, tt.want) { - t.Errorf("%q. Client.Update() = %v, want %v", tt.name, queries, tt.want) - } - } -} - -/* - - - - */ diff --git a/chronograf/integrations/server_test.go b/chronograf/integrations/server_test.go deleted file mode 100644 index 0f6462cb746..00000000000 --- a/chronograf/integrations/server_test.go +++ /dev/null @@ -1,3730 +0,0 @@ -package integrations - -// This was intentionally added under the integrations package and not the integrations test package -// so that changes in other parts of the code base that may have an effect on these test will not -// compile until they are fixed. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - - "net/http" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" - "github.com/influxdata/influxdb/v2/chronograf/server" -) - -func TestServer(t *testing.T) { - type fields struct { - Organizations []chronograf.Organization - Mappings []chronograf.Mapping - Users []chronograf.User - Sources []chronograf.Source - Servers []chronograf.Server - Layouts []chronograf.Layout - Dashboards []chronograf.Dashboard - Config *chronograf.Config - } - type args struct { - server *server.Server - method string - path string - payload interface{} // Expects this to be a json serializable struct - principal oauth2.Principal - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - subName string - fields fields - args args - wants wants - }{ - // { - // name: "GET /sources/5000", - // subName: "Get specific source; including Canned source", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // { - // Name: "viewer", - // Organization: "howdy", // from canned testdata - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/sources/5000", - // principal: oauth2.Principal{ - // Organization: "howdy", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "id": "5000", - // "name": "Influx 1", - // "type": "influx-enterprise", - // "username": "user1", - // "url": "http://localhost:8086", - // "metaUrl": "http://metaurl.com", - // "default": true, - // "telegraf": "telegraf", - // "organization": "howdy", - // "defaultRP": "", - // "authentication": "basic", - // "links": { - // "self": "/chronograf/v1/sources/5000", - // "kapacitors": "/chronograf/v1/sources/5000/kapacitors", - // "services": "/chronograf/v1/sources/5000/services", - // "proxy": "/chronograf/v1/sources/5000/proxy", - // "queries": "/chronograf/v1/sources/5000/queries", - // "write": "/chronograf/v1/sources/5000/write", - // "permissions": "/chronograf/v1/sources/5000/permissions", - // "users": "/chronograf/v1/sources/5000/users", - // "roles": "/chronograf/v1/sources/5000/roles", - // "databases": "/chronograf/v1/sources/5000/dbs", - // "annotations": "/chronograf/v1/sources/5000/annotations", - // "health": "/chronograf/v1/sources/5000/health" - // } - //} - //`, - // }, - // }, - // { - // name: "GET /sources/5000/kapacitors/5000", - // subName: "Get specific kapacitors; including Canned kapacitors", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // { - // Name: "viewer", - // Organization: "howdy", // from canned testdata - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/sources/5000/kapacitors/5000", - // principal: oauth2.Principal{ - // Organization: "howdy", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "id": "5000", - // "name": "Kapa 1", - // "url": "http://localhost:9092", - // "active": true, - // "insecureSkipVerify": false, - // "links": { - // "proxy": "/chronograf/v1/sources/5000/kapacitors/5000/proxy", - // "self": "/chronograf/v1/sources/5000/kapacitors/5000", - // "rules": "/chronograf/v1/sources/5000/kapacitors/5000/rules", - // "tasks": "/chronograf/v1/sources/5000/kapacitors/5000/proxy?path=/kapacitor/v1/tasks", - // "ping": "/chronograf/v1/sources/5000/kapacitors/5000/proxy?path=/kapacitor/v1/ping" - // } - //} - //`, - // }, - // }, - // { - // name: "GET /sources/5000/kapacitors", - // subName: "Get all kapacitors; including Canned kapacitors", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // { - // Name: "viewer", - // Organization: "howdy", // from canned testdata - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/sources/5000/kapacitors", - // principal: oauth2.Principal{ - // Organization: "howdy", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "kapacitors": [ - // { - // "id": "5000", - // "name": "Kapa 1", - // "url": "http://localhost:9092", - // "active": true, - // "insecureSkipVerify": false, - // "links": { - // "proxy": "/chronograf/v1/sources/5000/kapacitors/5000/proxy", - // "self": "/chronograf/v1/sources/5000/kapacitors/5000", - // "rules": "/chronograf/v1/sources/5000/kapacitors/5000/rules", - // "tasks": "/chronograf/v1/sources/5000/kapacitors/5000/proxy?path=/kapacitor/v1/tasks", - // "ping": "/chronograf/v1/sources/5000/kapacitors/5000/proxy?path=/kapacitor/v1/ping" - // } - // } - // ] - //} - //`, - // }, - // }, - // { - // name: "GET /sources", - // subName: "Get all sources; including Canned sources", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // { - // Name: "viewer", - // Organization: "howdy", // from canned testdata - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/sources", - // principal: oauth2.Principal{ - // Organization: "howdy", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "sources": [ - // { - // "id": "5000", - // "name": "Influx 1", - // "type": "influx-enterprise", - // "username": "user1", - // "url": "http://localhost:8086", - // "metaUrl": "http://metaurl.com", - // "default": true, - // "telegraf": "telegraf", - // "organization": "howdy", - // "defaultRP": "", - // "authentication": "basic", - // "links": { - // "self": "/chronograf/v1/sources/5000", - // "kapacitors": "/chronograf/v1/sources/5000/kapacitors", - // "services": "/chronograf/v1/sources/5000/services", - // "proxy": "/chronograf/v1/sources/5000/proxy", - // "queries": "/chronograf/v1/sources/5000/queries", - // "write": "/chronograf/v1/sources/5000/write", - // "permissions": "/chronograf/v1/sources/5000/permissions", - // "users": "/chronograf/v1/sources/5000/users", - // "roles": "/chronograf/v1/sources/5000/roles", - // "databases": "/chronograf/v1/sources/5000/dbs", - // "annotations": "/chronograf/v1/sources/5000/annotations", - // "health": "/chronograf/v1/sources/5000/health" - // } - // } - // ] - //} - //`, - // }, - // }, - // { - // name: "GET /organizations", - // subName: "Get all organizations; including Canned organization", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/organizations", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations" - // }, - // "organizations": [ - // { - // "links": { - // "self": "/chronograf/v1/organizations/default" - // }, - // "id": "default", - // "name": "Default", - // "defaultRole": "member" - // }, - // { - // "links": { - // "self": "/chronograf/v1/organizations/howdy" - // }, - // "id": "howdy", - // "name": "An Organization", - // "defaultRole": "viewer" - // } - // ] - //}`, - // }, - // }, - // { - // name: "GET /organizations/howdy", - // subName: "Get specific organizations; Canned organization", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/organizations/howdy", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations/howdy" - // }, - // "id": "howdy", - // "name": "An Organization", - // "defaultRole": "viewer" - //}`, - // }, - // }, - // { - // name: "GET /dashboards/1000", - // subName: "Get specific in the howdy organization; Using Canned testdata", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "howdy", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/dashboards/1000", - // principal: oauth2.Principal{ - // Organization: "howdy", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "id": 1000, - // "cells": [ - // { - // "i": "8f61c619-dd9b-4761-8aa8-577f27247093", - // "x": 0, - // "y": 0, - // "w": 11, - // "h": 5, - // "name": "Untitled Cell", - // "queries": [ - // { - // "query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time > :dashboardTime: GROUP BY time(:interval:) FILL(null)", - // "queryConfig": { - // "database": "telegraf", - // "measurement": "cpg", - // "retentionPolicy": "autogen", - // "fields": [ - // { - // "value": "mean", - // "type": "func", - // "alias": "mean_value", - // "args": [ - // { - // "value": "value", - // "type": "field", - // "alias": "" - // } - // ] - // } - // ], - // "tags": {}, - // "groupBy": { - // "time": "auto", - // "tags": [] - // }, - // "areTagsAccepted": false, - // "fill": "null", - // "rawText": null, - // "range": null, - // "shifts": null - // }, - // "source": "/chronograf/v1/sources/2" - // } - // ], - // "axes": { - // "x": { - // "bounds": [], - // "label": "", - // "prefix": "", - // "suffix": "", - // "base": "10", - // "scale": "linear" - // }, - // "y": { - // "bounds": [], - // "label": "", - // "prefix": "", - // "suffix": "", - // "base": "10", - // "scale": "linear" - // }, - // "y2": { - // "bounds": [], - // "label": "", - // "prefix": "", - // "suffix": "", - // "base": "10", - // "scale": "linear" - // } - // }, - // "type": "line", - // "colors": [ - // { - // "id": "0", - // "type": "min", - // "hex": "#00C9FF", - // "name": "laser", - // "value": "0" - // }, - // { - // "id": "1", - // "type": "max", - // "hex": "#9394FF", - // "name": "comet", - // "value": "100" - // } - // ], - // "legend":{ - // "type": "static", - // "orientation": "bottom" - // }, - // "tableOptions":{ - // "verticalTimeAxis": false, - // "sortBy":{ - // "internalName": "", - // "displayName": "", - // "visible": false - // }, - // "wrapping": "", - // "fixFirstColumn": false - // }, - // "fieldOptions": null, - // "timeFormat": "", - // "decimalPlaces":{ - // "isEnforced": false, - // "digits": 0 - // }, - // "links": { - // "self": "/chronograf/v1/dashboards/1000/cells/8f61c619-dd9b-4761-8aa8-577f27247093" - // } - // } - // ], - // "templates": [ - // { - // "tempVar": ":dbs:", - // "values": [ - // { - // "value": "_internal", - // "type": "database", - // "selected": true - // }, - // { - // "value": "telegraf", - // "type": "database", - // "selected": false - // }, - // { - // "value": "tensorflowdb", - // "type": "database", - // "selected": false - // }, - // { - // "value": "pushgateway", - // "type": "database", - // "selected": false - // }, - // { - // "value": "node_exporter", - // "type": "database", - // "selected": false - // }, - // { - // "value": "mydb", - // "type": "database", - // "selected": false - // }, - // { - // "value": "tiny", - // "type": "database", - // "selected": false - // }, - // { - // "value": "blah", - // "type": "database", - // "selected": false - // }, - // { - // "value": "test", - // "type": "database", - // "selected": false - // }, - // { - // "value": "chronograf", - // "type": "database", - // "selected": false - // }, - // { - // "value": "db_name", - // "type": "database", - // "selected": false - // }, - // { - // "value": "demo", - // "type": "database", - // "selected": false - // }, - // { - // "value": "eeg", - // "type": "database", - // "selected": false - // }, - // { - // "value": "solaredge", - // "type": "database", - // "selected": false - // }, - // { - // "value": "zipkin", - // "type": "database", - // "selected": false - // } - // ], - // "id": "e7e498bf-5869-4874-9071-24628a2cda63", - // "type": "databases", - // "label": "", - // "query": { - // "influxql": "SHOW DATABASES", - // "measurement": "", - // "tagKey": "", - // "fieldKey": "" - // }, - // "links": { - // "self": "/chronograf/v1/dashboards/1000/templates/e7e498bf-5869-4874-9071-24628a2cda63" - // } - // } - // ], - // "name": "Name This Dashboard", - // "organization": "howdy", - // "links": { - // "self": "/chronograf/v1/dashboards/1000", - // "cells": "/chronograf/v1/dashboards/1000/cells", - // "templates": "/chronograf/v1/dashboards/1000/templates" - // } - //}`, - // }, - // }, - // { - // name: "GET /dashboards", - // subName: "Get all dashboards in the howdy organization; Using Canned testdata", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // { - // Name: "admin", - // Organization: "howdy", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/dashboards", - // principal: oauth2.Principal{ - // Organization: "howdy", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "dashboards": [ - // { - // "id": 1000, - // "cells": [ - // { - // "i": "8f61c619-dd9b-4761-8aa8-577f27247093", - // "x": 0, - // "y": 0, - // "w": 11, - // "h": 5, - // "name": "Untitled Cell", - // "queries": [ - // { - // "query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time > :dashboardTime: GROUP BY time(:interval:) FILL(null)", - // "queryConfig": { - // "database": "telegraf", - // "measurement": "cpg", - // "retentionPolicy": "autogen", - // "fields": [ - // { - // "value": "mean", - // "type": "func", - // "alias": "mean_value", - // "args": [ - // { - // "value": "value", - // "type": "field", - // "alias": "" - // } - // ] - // } - // ], - // "tags": {}, - // "groupBy": { - // "time": "auto", - // "tags": [] - // }, - // "areTagsAccepted": false, - // "fill": "null", - // "rawText": null, - // "range": null, - // "shifts": null - // }, - // "source": "/chronograf/v1/sources/2" - // } - // ], - // "axes": { - // "x": { - // "bounds": [], - // "label": "", - // "prefix": "", - // "suffix": "", - // "base": "10", - // "scale": "linear" - // }, - // "y": { - // "bounds": [], - // "label": "", - // "prefix": "", - // "suffix": "", - // "base": "10", - // "scale": "linear" - // }, - // "y2": { - // "bounds": [], - // "label": "", - // "prefix": "", - // "suffix": "", - // "base": "10", - // "scale": "linear" - // } - // }, - // "type": "line", - // "colors": [ - // { - // "id": "0", - // "type": "min", - // "hex": "#00C9FF", - // "name": "laser", - // "value": "0" - // }, - // { - // "id": "1", - // "type": "max", - // "hex": "#9394FF", - // "name": "comet", - // "value": "100" - // } - // ], - // "legend": { - // "type": "static", - // "orientation": "bottom" - // }, - // "tableOptions":{ - // "verticalTimeAxis": false, - // "sortBy":{ - // "internalName": "", - // "displayName": "", - // "visible": false - // }, - // "wrapping": "", - // "fixFirstColumn": false - // }, - // "fieldOptions": null, - // "timeFormat": "", - // "decimalPlaces":{ - // "isEnforced": false, - // "digits": 0 - // }, - // "links": { - // "self": "/chronograf/v1/dashboards/1000/cells/8f61c619-dd9b-4761-8aa8-577f27247093" - // } - // } - // ], - // "templates": [ - // { - // "tempVar": ":dbs:", - // "values": [ - // { - // "value": "_internal", - // "type": "database", - // "selected": true - // }, - // { - // "value": "telegraf", - // "type": "database", - // "selected": false - // }, - // { - // "value": "tensorflowdb", - // "type": "database", - // "selected": false - // }, - // { - // "value": "pushgateway", - // "type": "database", - // "selected": false - // }, - // { - // "value": "node_exporter", - // "type": "database", - // "selected": false - // }, - // { - // "value": "mydb", - // "type": "database", - // "selected": false - // }, - // { - // "value": "tiny", - // "type": "database", - // "selected": false - // }, - // { - // "value": "blah", - // "type": "database", - // "selected": false - // }, - // { - // "value": "test", - // "type": "database", - // "selected": false - // }, - // { - // "value": "chronograf", - // "type": "database", - // "selected": false - // }, - // { - // "value": "db_name", - // "type": "database", - // "selected": false - // }, - // { - // "value": "demo", - // "type": "database", - // "selected": false - // }, - // { - // "value": "eeg", - // "type": "database", - // "selected": false - // }, - // { - // "value": "solaredge", - // "type": "database", - // "selected": false - // }, - // { - // "value": "zipkin", - // "type": "database", - // "selected": false - // } - // ], - // "id": "e7e498bf-5869-4874-9071-24628a2cda63", - // "type": "databases", - // "label": "", - // "query": { - // "influxql": "SHOW DATABASES", - // "measurement": "", - // "tagKey": "", - // "fieldKey": "" - // }, - // "links": { - // "self": "/chronograf/v1/dashboards/1000/templates/e7e498bf-5869-4874-9071-24628a2cda63" - // } - // } - // ], - // "name": "Name This Dashboard", - // "organization": "howdy", - // "links": { - // "self": "/chronograf/v1/dashboards/1000", - // "cells": "/chronograf/v1/dashboards/1000/cells", - // "templates": "/chronograf/v1/dashboards/1000/templates" - // } - // } - // ] - //}`, - // }, - // }, - // { - // name: "GET /users", - // subName: "User Not Found in the Default Organization", - // fields: fields{ - // Users: []chronograf.User{}, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/organizations/default/users", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 403, - // body: `{"code":403,"message":"User is not authorized"}`, - // }, - // }, - // { - // name: "GET /users", - // subName: "Single User in the Default Organization as SuperAdmin", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/organizations/default/users", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations/default/users" - // }, - // "users": [ - // { - // "links": { - // "self": "/chronograf/v1/organizations/default/users/1" - // }, - // "id": "1", - // "name": "billibob", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "admin", - // "organization": "default" - // } - // ] - // } - // ] - //}`, - // }, - // }, - // { - // name: "GET /users", - // subName: "Two users in two organizations; user making request is as SuperAdmin with out raw query param", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // { - // ID: 2, // This is artificial, but should be reflective of the users actual ID - // Name: "billietta", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "cool", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/organizations/default/users", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations/default/users" - // }, - // "users": [ - // { - // "links": { - // "self": "/chronograf/v1/organizations/default/users/1" - // }, - // "id": "1", - // "name": "billibob", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "admin", - // "organization": "default" - // } - // ] - // } - // ] - //} - //`, - // }, - // }, - // { - // name: "POST /users", - // subName: "User making request is as SuperAdmin with raw query param; being created has wildcard role", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // payload: &chronograf.User{ - // Name: "user", - // Provider: "provider", - // Scheme: "oauth2", - // Roles: []chronograf.Role{ - // { - // Name: "*", - // Organization: "default", - // }, - // }, - // }, - // method: "POST", - // path: "/chronograf/v1/users", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 201, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/users/2" - // }, - // "id": "2", - // "name": "user", - // "provider": "provider", - // "scheme": "oauth2", - // "superAdmin": false, - // "roles": [ - // { - // "name": "member", - // "organization": "default" - // } - // ] - //} - //`, - // }, - // }, - // { - // name: "POST /users", - // subName: "User making request is as SuperAdmin with raw query param; being created has no roles", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // payload: &chronograf.User{ - // Name: "user", - // Provider: "provider", - // Scheme: "oauth2", - // Roles: []chronograf.Role{}, - // }, - // method: "POST", - // path: "/chronograf/v1/users", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 201, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/users/2" - // }, - // "id": "2", - // "name": "user", - // "provider": "provider", - // "scheme": "oauth2", - // "superAdmin": false, - // "roles": [] - //} - //`, - // }, - // }, - // { - // name: "GET /users", - // subName: "Two users in two organizations; user making request is as SuperAdmin with raw query param", - // fields: fields{ - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // { - // ID: 2, // This is artificial, but should be reflective of the users actual ID - // Name: "billietta", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "1", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/users", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/users" - // }, - // "users": [ - // { - // "links": { - // "self": "/chronograf/v1/users/1" - // }, - // "id": "1", - // "name": "billibob", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "admin", - // "organization": "default" - // } - // ] - // }, - // { - // "links": { - // "self": "/chronograf/v1/users/2" - // }, - // "id": "2", - // "name": "billietta", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "admin", - // "organization": "1" - // } - // ] - // } - // ] - //} - //`, - // }, - // }, - // { - // name: "GET /users", - // subName: "Two users in two organizations; user making request is as not SuperAdmin with raw query param", - // fields: fields{ - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // { - // ID: 2, // This is artificial, but should be reflective of the users actual ID - // Name: "billietta", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: false, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // { - // Name: "admin", - // Organization: "1", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/users", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billieta", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 403, - // body: ` - //{ - // "code": 403, - // "message": "User is not authorized" - //} - //`, - // }, - // }, - // { - // name: "POST /users", - // subName: "Create a New User with SuperAdmin status; SuperAdminNewUsers is true (the default case); User on Principal is a SuperAdmin", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "POST", - // path: "/chronograf/v1/organizations/default/users", - // payload: &chronograf.User{ - // Name: "user", - // Provider: "provider", - // Scheme: "oauth2", - // Roles: []chronograf.Role{ - // { - // Name: roles.EditorRoleName, - // Organization: "default", - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 201, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations/default/users/2" - // }, - // "id": "2", - // "name": "user", - // "provider": "provider", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "editor", - // "organization": "default" - // } - // ] - //}`, - // }, - // }, - // { - // name: "POST /users", - // subName: "Create a New User with SuperAdmin status; SuperAdminNewUsers is false; User on Principal is a SuperAdmin", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "POST", - // path: "/chronograf/v1/organizations/default/users", - // payload: &chronograf.User{ - // Name: "user", - // Provider: "provider", - // Scheme: "oauth2", - // Roles: []chronograf.Role{ - // { - // Name: roles.EditorRoleName, - // Organization: "default", - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 201, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations/default/users/2" - // }, - // "id": "2", - // "name": "user", - // "provider": "provider", - // "scheme": "oauth2", - // "superAdmin": false, - // "roles": [ - // { - // "name": "editor", - // "organization": "default" - // } - // ] - //}`, - // }, - // }, - // { - // name: "POST /users", - // subName: "Create a New User with SuperAdmin status; SuperAdminNewUsers is false; User on Principal is Admin, but not a SuperAdmin", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: false, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "POST", - // path: "/chronograf/v1/organizations/default/users", - // payload: &chronograf.User{ - // Name: "user", - // Provider: "provider", - // Scheme: "oauth2", - // Roles: []chronograf.Role{ - // { - // Name: roles.EditorRoleName, - // Organization: "default", - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 201, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations/default/users/2" - // }, - // "id": "2", - // "name": "user", - // "provider": "provider", - // "scheme": "oauth2", - // "superAdmin": false, - // "roles": [ - // { - // "name": "editor", - // "organization": "default" - // } - // ] - //}`, - // }, - // }, - // { - // name: "POST /users", - // subName: "Create a New User with SuperAdmin status; SuperAdminNewUsers is true; User on Principal is Admin, but not a SuperAdmin", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: false, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "POST", - // path: "/chronograf/v1/organizations/default/users", - // payload: &chronograf.User{ - // Name: "user", - // Provider: "provider", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: roles.EditorRoleName, - // Organization: "default", - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 401, - // body: ` - //{ - // "code": 401, - // "message": "user does not have authorization required to set SuperAdmin status. See https://github.com/influxdata/influxdb/chronograf/issues/2601 for more information." - //}`, - // }, - // }, - // { - // name: "POST /users", - // subName: "Create a New User with in multiple organizations; User on Principal is a SuperAdmin with raw query param", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "POST", - // path: "/chronograf/v1/users", - // payload: &chronograf.User{ - // Name: "user", - // Provider: "provider", - // Scheme: "oauth2", - // Roles: []chronograf.Role{ - // { - // Name: roles.EditorRoleName, - // Organization: "default", - // }, - // { - // Name: roles.EditorRoleName, - // Organization: "1", - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 201, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/users/2" - // }, - // "id": "2", - // "name": "user", - // "provider": "provider", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "editor", - // "organization": "default" - // }, - // { - // "name": "editor", - // "organization": "1" - // } - // ] - //}`, - // }, - // }, - // { - // name: "PATCH /users", - // subName: "Update user to have no roles", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "PATCH", - // path: "/chronograf/v1/users/1", - // payload: map[string]interface{}{ - // "name": "billibob", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": []chronograf.Role{}, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/users/1" - // }, - // "id": "1", - // "name": "billibob", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // ] - //}`, - // }, - // }, - // { - // name: "PATCH /users", - // subName: "Update user roles with wildcard", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "PATCH", - // path: "/chronograf/v1/users/1", - // payload: &chronograf.User{ - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: roles.AdminRoleName, - // Organization: "default", - // }, - // { - // Name: roles.WildcardRoleName, - // Organization: "1", - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/users/1" - // }, - // "id": "1", - // "name": "billibob", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "admin", - // "organization": "default" - // }, - // { - // "name": "viewer", - // "organization": "1" - // } - // ] - //}`, - // }, - // }, - // { - // name: "PATCH /users/1", - // subName: "SuperAdmin modifying their own status", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "PATCH", - // path: "/chronograf/v1/organizations/default/users/1", - // payload: map[string]interface{}{ - // "id": "1", - // "superAdmin": false, - // "roles": []interface{}{ - // map[string]interface{}{ - // "name": "admin", - // "organization": "default", - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: http.StatusUnauthorized, - // body: ` - //{ - // "code": 401, - // "message": "user cannot modify their own SuperAdmin status" - //} - //`, - // }, - // }, - // { - // name: "GET /organization/default/users", - // subName: "Organization not set explicitly on principal", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Organizations: []chronograf.Organization{}, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/organizations/default/users", - // principal: oauth2.Principal{ - // Organization: "", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/organizations/default/users" - // }, - // "users": [ - // { - // "links": { - // "self": "/chronograf/v1/organizations/default/users/1" - // }, - // "id": "1", - // "name": "billibob", - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "roles": [ - // { - // "name": "admin", - // "organization": "default" - // } - // ] - // } - // ] - //} - //`, - // }, - // }, - // { - // name: "PUT /me", - // subName: "Change SuperAdmins current organization to org they dont belong to", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "Sweet", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "PUT", - // path: "/chronograf/v1/me", - // payload: map[string]string{ - // "organization": "1", - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "id": "1", - // "name": "billibob", - // "roles": [ - // { - // "name": "admin", - // "organization": "default" - // }, - // { - // "name": "viewer", - // "organization": "1" - // } - // ], - // "provider": "github", - // "scheme": "oauth2", - // "superAdmin": true, - // "links": { - // "self": "/chronograf/v1/organizations/1/users/1" - // }, - // "organizations": [ - // { - // "id": "1", - // "name": "Sweet", - // "defaultRole": "viewer" - // }, - // { - // "id": "default", - // "name": "Default", - // "defaultRole": "member" - // } - // ], - // "currentOrganization": { - // "id": "1", - // "name": "Sweet", - // "defaultRole": "viewer" - // } - //}`, - // }, - // }, - // { - // name: "PUT /me", - // subName: "Change Admin current organization to org they dont belong to", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "Sweet", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: false, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "PUT", - // path: "/chronograf/v1/me", - // payload: map[string]string{ - // "organization": "1", - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 403, - // body: ` - // { - // "code": 403, - // "message": "user not found" - //}`, - // }, - // }, - // { - // name: "GET /me", - // subName: "New user hits me for the first time", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Mappings: []chronograf.Mapping{ - // { - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "influxdata", - // }, - // { - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "*", - // }, - // { - // ID: "2", - // Organization: "2", - // Provider: "github", - // Scheme: "*", - // ProviderOrganization: "*", - // }, - // { - // ID: "3", - // Organization: "3", - // Provider: "auth0", - // Scheme: "ldap", - // ProviderOrganization: "*", - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "Sweet", - // DefaultRole: roles.ViewerRoleName, - // }, - // { - // ID: "2", - // Name: "What", - // DefaultRole: roles.EditorRoleName, - // }, - // { - // ID: "3", - // Name: "Okay", - // DefaultRole: roles.AdminRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{}, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/me", - // principal: oauth2.Principal{ - // Subject: "billietta", - // Issuer: "github", - // Group: "influxdata,idk,mimi", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "id": "2", - // "name": "billietta", - // "roles": [ - // { - // "name": "viewer", - // "organization": "1" - // }, - // { - // "name": "editor", - // "organization": "2" - // }, - // { - // "name": "member", - // "organization": "default" - // } - // ], - // "provider": "github", - // "scheme": "oauth2", - // "links": { - // "self": "/chronograf/v1/organizations/default/users/2" - // }, - // "organizations": [ - // { - // "id": "1", - // "name": "Sweet", - // "defaultRole": "viewer" - // }, - // { - // "id": "2", - // "name": "What", - // "defaultRole": "editor" - // }, - // { - // "id": "default", - // "name": "Default", - // "defaultRole": "member" - // } - // ], - // "currentOrganization": { - // "id": "default", - // "name": "Default", - // "defaultRole": "member" - // } - //} - //`, - // }, - // }, - // { - // name: "GET /mappings", - // subName: "get all mappings", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Mappings: []chronograf.Mapping{ - // { - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "influxdata", - // }, - // { - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "*", - // }, - // { - // ID: "2", - // Organization: "2", - // Provider: "github", - // Scheme: "*", - // ProviderOrganization: "*", - // }, - // { - // ID: "3", - // Organization: "3", - // Provider: "auth0", - // Scheme: "ldap", - // ProviderOrganization: "*", - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "Sweet", - // DefaultRole: roles.ViewerRoleName, - // }, - // { - // ID: "2", - // Name: "What", - // DefaultRole: roles.EditorRoleName, - // }, - // { - // ID: "3", - // Name: "Okay", - // DefaultRole: roles.AdminRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/mappings", - // principal: oauth2.Principal{ - // Subject: "billibob", - // Issuer: "github", - // Group: "influxdata,idk,mimi", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/mappings" - // }, - // "mappings": [ - // { - // "links": { - // "self": "/chronograf/v1/mappings/1" - // }, - // "id": "1", - // "organizationId": "1", - // "provider": "*", - // "scheme": "*", - // "providerOrganization": "influxdata" - // }, - // { - // "links": { - // "self": "/chronograf/v1/mappings/2" - // }, - // "id": "2", - // "organizationId": "1", - // "provider": "*", - // "scheme": "*", - // "providerOrganization": "*" - // }, - // { - // "links": { - // "self": "/chronograf/v1/mappings/3" - // }, - // "id": "3", - // "organizationId": "2", - // "provider": "github", - // "scheme": "*", - // "providerOrganization": "*" - // }, - // { - // "links": { - // "self": "/chronograf/v1/mappings/4" - // }, - // "id": "4", - // "organizationId": "3", - // "provider": "auth0", - // "scheme": "ldap", - // "providerOrganization": "*" - // }, - // { - // "links": { - // "self": "/chronograf/v1/mappings/default" - // }, - // "id": "default", - // "organizationId": "default", - // "provider": "*", - // "scheme": "*", - // "providerOrganization": "*" - // } - // ] - //} - //`, - // }, - // }, - // { - // name: "GET /mappings", - // subName: "get all mappings - user is not super admin", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Mappings: []chronograf.Mapping{ - // { - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "influxdata", - // }, - // { - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "*", - // }, - // { - // ID: "2", - // Organization: "2", - // Provider: "github", - // Scheme: "*", - // ProviderOrganization: "*", - // }, - // { - // ID: "3", - // Organization: "3", - // Provider: "auth0", - // Scheme: "ldap", - // ProviderOrganization: "*", - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "Sweet", - // DefaultRole: roles.ViewerRoleName, - // }, - // { - // ID: "2", - // Name: "What", - // DefaultRole: roles.EditorRoleName, - // }, - // { - // ID: "3", - // Name: "Okay", - // DefaultRole: roles.AdminRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: false, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/mappings", - // principal: oauth2.Principal{ - // Subject: "billibob", - // Issuer: "github", - // Group: "influxdata,idk,mimi", - // }, - // }, - // wants: wants{ - // statusCode: 403, - // body: ` - //{ - // "code": 403, - // "message": "User is not authorized" - //} - //`, - // }, - // }, - // { - // name: "POST /mappings", - // subName: "create new mapping", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Mappings: []chronograf.Mapping{}, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "Sweet", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "POST", - // path: "/chronograf/v1/mappings", - // payload: &chronograf.Mapping{ - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "influxdata", - // }, - // principal: oauth2.Principal{ - // Subject: "billibob", - // Issuer: "github", - // Group: "influxdata,idk,mimi", - // }, - // }, - // wants: wants{ - // statusCode: 201, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/mappings/1" - // }, - // "id": "1", - // "organizationId": "1", - // "provider": "*", - // "scheme": "*", - // "providerOrganization": "influxdata" - //} - //`, - // }, - // }, - // { - // name: "PUT /mappings", - // subName: "update new mapping", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: false, - // }, - // }, - // Mappings: []chronograf.Mapping{ - // chronograf.Mapping{ - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "influxdata", - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "Sweet", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "PUT", - // path: "/chronograf/v1/mappings/1", - // payload: &chronograf.Mapping{ - // ID: "1", - // Organization: "1", - // Provider: "*", - // Scheme: "*", - // ProviderOrganization: "*", - // }, - // principal: oauth2.Principal{ - // Subject: "billibob", - // Issuer: "github", - // Group: "influxdata,idk,mimi", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "links": { - // "self": "/chronograf/v1/mappings/1" - // }, - // "id": "1", - // "organizationId": "1", - // "provider": "*", - // "scheme": "*", - // "providerOrganization": "*" - //} - //`, - // }, - // }, - // { - // name: "GET /org_config", - // subName: "default org", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/org_config", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - // { - // "links": { - // "self": "\/chronograf\/v1\/org_config", - // "logViewer": "\/chronograf\/v1\/org_config\/logviewer" - // }, - // "organization": "default", - // "logViewer": { - // "columns": [ - // { - // "name": "time", - // "position": 0, - // "encodings": [ - // { - // "type": "visibility", - // "value": "hidden" - // } - // ] - // }, - // { - // "name": "severity", - // "position": 1, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "label", - // "value": "icon" - // }, - // { - // "type": "label", - // "value": "text" - // }, - // { - // "type": "color", - // "value": "ruby", - // "name": "emerg" - // }, - // { - // "type": "color", - // "value": "fire", - // "name": "alert" - // }, - // { - // "type": "color", - // "value": "curacao", - // "name": "crit" - // }, - // { - // "type": "color", - // "value": "tiger", - // "name": "err" - // }, - // { - // "type": "color", - // "value": "pineapple", - // "name": "warning" - // }, - // { - // "type": "color", - // "value": "rainforest", - // "name": "notice" - // }, - // { - // "type": "color", - // "value": "star", - // "name": "info" - // }, - // { - // "type": "color", - // "value": "wolf", - // "name": "debug" - // } - // ] - // }, - // { - // "name": "timestamp", - // "position": 2, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "message", - // "position": 3, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "facility", - // "position": 4, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "procid", - // "position": 5, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "displayName", - // "value": "Proc ID" - // } - // ] - // }, - // { - // "name": "appname", - // "position": 6, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "displayName", - // "value": "Application" - // } - // ] - // }, - // { - // "name": "host", - // "position": 7, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // } - // ] - // } - // } - // `, - // }, - // }, - // { - // name: "GET /org_config/logviewer", - // subName: "default org", - // fields: fields{ - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/org_config/logviewer", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - // { - // "links": { - // "self": "\/chronograf\/v1\/org_config/logviewer" - // }, - // "columns": [ - // { - // "name": "time", - // "position": 0, - // "encodings": [ - // { - // "type": "visibility", - // "value": "hidden" - // } - // ] - // }, - // { - // "name": "severity", - // "position": 1, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "label", - // "value": "icon" - // }, - // { - // "type": "label", - // "value": "text" - // }, - // { - // "type": "color", - // "value": "ruby", - // "name": "emerg" - // }, - // { - // "type": "color", - // "value": "fire", - // "name": "alert" - // }, - // { - // "type": "color", - // "value": "curacao", - // "name": "crit" - // }, - // { - // "type": "color", - // "value": "tiger", - // "name": "err" - // }, - // { - // "type": "color", - // "value": "pineapple", - // "name": "warning" - // }, - // { - // "type": "color", - // "value": "rainforest", - // "name": "notice" - // }, - // { - // "type": "color", - // "value": "star", - // "name": "info" - // }, - // { - // "type": "color", - // "value": "wolf", - // "name": "debug" - // } - // ] - // }, - // { - // "name": "timestamp", - // "position": 2, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "message", - // "position": 3, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "facility", - // "position": 4, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "procid", - // "position": 5, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "displayName", - // "value": "Proc ID" - // } - // ] - // }, - // { - // "name": "appname", - // "position": 6, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "displayName", - // "value": "Application" - // } - // ] - // }, - // { - // "name": "host", - // "position": 7, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // } - // ] - // } - // `, - // }, - // }, - // { - // name: "PUT /org_config/logviewer", - // subName: "default org", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "PUT", - // path: "/chronograf/v1/org_config/logviewer", - // payload: &chronograf.LogViewerConfig{ - // Columns: []chronograf.LogViewerColumn{ - // { - // Name: "time", - // Position: 0, - // Encodings: []chronograf.ColumnEncoding{ - // { - // Type: "visibility", - // Value: "hidden", - // }, - // }, - // }, - // { - // Name: "severity", - // Position: 1, - // Encodings: []chronograf.ColumnEncoding{ - // - // { - // Type: "visibility", - // Value: "visible", - // }, - // { - // Type: "label", - // Value: "icon", - // }, - // { - // Type: "color", - // Name: "emerg", - // Value: "ruby", - // }, - // { - // Type: "color", - // Name: "alert", - // Value: "fire", - // }, - // { - // Type: "color", - // Name: "crit", - // Value: "curacao", - // }, - // { - // Type: "color", - // Name: "err", - // Value: "tiger", - // }, - // { - // Type: "color", - // Name: "warning", - // Value: "pineapple", - // }, - // { - // Type: "color", - // Name: "notice", - // Value: "wolf", - // }, - // { - // Type: "color", - // Name: "info", - // Value: "wolf", - // }, - // { - // Type: "color", - // Name: "debug", - // Value: "wolf", - // }, - // }, - // }, - // { - // Name: "timestamp", - // Position: 3, - // Encodings: []chronograf.ColumnEncoding{ - // - // { - // Type: "visibility", - // Value: "visible", - // }, - // }, - // }, - // { - // Name: "message", - // Position: 2, - // Encodings: []chronograf.ColumnEncoding{ - // - // { - // Type: "visibility", - // Value: "visible", - // }, - // }, - // }, - // { - // Name: "facility", - // Position: 4, - // Encodings: []chronograf.ColumnEncoding{ - // - // { - // Type: "visibility", - // Value: "visible", - // }, - // }, - // }, - // { - // Name: "procid", - // Position: 5, - // Encodings: []chronograf.ColumnEncoding{ - // - // { - // Type: "visibility", - // Value: "hidden", - // }, - // { - // Type: "displayName", - // Value: "ProcID!", - // }, - // }, - // }, - // { - // Name: "appname", - // Position: 6, - // Encodings: []chronograf.ColumnEncoding{ - // { - // Type: "visibility", - // Value: "visible", - // }, - // { - // Type: "displayName", - // Value: "Application", - // }, - // }, - // }, - // { - // Name: "host", - // Position: 7, - // Encodings: []chronograf.ColumnEncoding{ - // { - // Type: "visibility", - // Value: "visible", - // }, - // }, - // }, - // }, - // }, - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - // { - // "links": { - // "self": "\/chronograf\/v1\/org_config\/logviewer" - // }, - // "columns": [ - // { - // "name": "time", - // "position": 0, - // "encodings": [ - // { - // "type": "visibility", - // "value": "hidden" - // } - // ] - // }, - // { - // "name": "severity", - // "position": 1, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "label", - // "value": "icon" - // }, - // { - // "type": "color", - // "value": "ruby", - // "name": "emerg" - // }, - // { - // "type": "color", - // "value": "fire", - // "name": "alert" - // }, - // { - // "type": "color", - // "value": "curacao", - // "name": "crit" - // }, - // { - // "type": "color", - // "value": "tiger", - // "name": "err" - // }, - // { - // "type": "color", - // "value": "pineapple", - // "name": "warning" - // }, - // { - // "type": "color", - // "value": "wolf", - // "name": "notice" - // }, - // { - // "type": "color", - // "value": "wolf", - // "name": "info" - // }, - // { - // "type": "color", - // "value": "wolf", - // "name": "debug" - // } - // ] - // }, - // { - // "name": "timestamp", - // "position": 3, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "message", - // "position": 2, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "facility", - // "position": 4, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // }, - // { - // "name": "procid", - // "position": 5, - // "encodings": [ - // { - // "type": "visibility", - // "value": "hidden" - // }, - // { - // "type": "displayName", - // "value": "ProcID!" - // } - // ] - // }, - // { - // "name": "appname", - // "position": 6, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // }, - // { - // "type": "displayName", - // "value": "Application" - // } - // ] - // }, - // { - // "name": "host", - // "position": 7, - // "encodings": [ - // { - // "type": "visibility", - // "value": "visible" - // } - // ] - // } - // ] - // } - // `, - // }, - // }, - // { - // name: "GET /", - // subName: "signed into default org", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: true, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/", - // principal: oauth2.Principal{ - // Organization: "default", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "layouts": "/chronograf/v1/layouts", - // "cells": "/chronograf/v2/cells", - // "users": "/chronograf/v1/organizations/default/users", - // "allUsers": "/chronograf/v1/users", - // "organizations": "/chronograf/v1/organizations", - // "mappings": "/chronograf/v1/mappings", - // "sources": "/chronograf/v1/sources", - // "me": "/chronograf/v1/me", - // "environment": "/chronograf/v1/env", - // "dashboards": "/chronograf/v1/dashboards", - // "dashboardsv2":"/chronograf/v2/dashboards", - // "config": { - // "self": "/chronograf/v1/config", - // "auth": "/chronograf/v1/config/auth" - // }, - // "auth": [ - // { - // "name": "github", - // "label": "Github", - // "login": "/oauth/github/login", - // "logout": "/oauth/github/logout", - // "callback": "/oauth/github/callback" - // } - // ], - // "logout": "/oauth/logout", - // "external": { - // "statusFeed": "" - // }, - // "orgConfig": { - // "logViewer": "/chronograf/v1/org_config/logviewer", - // "self": "/chronograf/v1/org_config" - // }, - // "flux": { - // "ast": "/chronograf/v1/flux/ast", - // "self": "/chronograf/v1/flux", - // "suggestions": "/chronograf/v1/flux/suggestions" - // } - //} - //`, - // }, - // }, - // { - // name: "GET /", - // subName: "signed into org 1", - // fields: fields{ - // Config: &chronograf.Config{ - // Auth: chronograf.AuthConfig{ - // SuperAdminNewUsers: true, - // }, - // }, - // Organizations: []chronograf.Organization{ - // { - // ID: "1", - // Name: "cool", - // DefaultRole: roles.ViewerRoleName, - // }, - // }, - // Users: []chronograf.User{ - // { - // ID: 1, // This is artificial, but should be reflective of the users actual ID - // Name: "billibob", - // Provider: "github", - // Scheme: "oauth2", - // SuperAdmin: false, - // Roles: []chronograf.Role{ - // { - // Name: "admin", - // Organization: "default", - // }, - // { - // Name: "member", - // Organization: "1", - // }, - // }, - // }, - // }, - // }, - // args: args{ - // server: &server.Server{ - // GithubClientID: "not empty", - // GithubClientSecret: "not empty", - // }, - // method: "GET", - // path: "/chronograf/v1/", - // principal: oauth2.Principal{ - // Organization: "1", - // Subject: "billibob", - // Issuer: "github", - // }, - // }, - // wants: wants{ - // statusCode: 200, - // body: ` - //{ - // "layouts": "/chronograf/v1/layouts", - // "cells": "/chronograf/v2/cells", - // "users": "/chronograf/v1/organizations/1/users", - // "allUsers": "/chronograf/v1/users", - // "organizations": "/chronograf/v1/organizations", - // "mappings": "/chronograf/v1/mappings", - // "sources": "/chronograf/v1/sources", - // "me": "/chronograf/v1/me", - // "environment": "/chronograf/v1/env", - // "dashboards": "/chronograf/v1/dashboards", - // "dashboardsv2":"/chronograf/v2/dashboards", - // "config": { - // "self": "/chronograf/v1/config", - // "auth": "/chronograf/v1/config/auth" - // }, - // "orgConfig": { - // "logViewer": "/chronograf/v1/org_config/logviewer", - // "self": "/chronograf/v1/org_config" - // }, - // "auth": [ - // { - // "name": "github", - // "label": "Github", - // "login": "/oauth/github/login", - // "logout": "/oauth/github/logout", - // "callback": "/oauth/github/callback" - // } - // ], - // "logout": "/oauth/logout", - // "external": { - // "statusFeed": "" - // }, - // "flux": { - // "ast": "/chronograf/v1/flux/ast", - // "self": "/chronograf/v1/flux", - // "suggestions": "/chronograf/v1/flux/suggestions" - // } - //} - //`, - // }, - // }, - } - - for _, tt := range tests { - testName := fmt.Sprintf("%s: %s", tt.name, tt.subName) - t.Run(testName, func(t *testing.T) { - ctx := context.TODO() - // Create Test Server - host, port := hostAndPort() - tt.args.server.Host = host - tt.args.server.Port = port - - // Use testdata directory for the canned data - tt.args.server.CannedPath = "testdata" - tt.args.server.ResourcesPath = "testdata" - - // This is so that we can use staticly generate jwts - tt.args.server.TokenSecret = "secret" - - // Endpoint for validating RSA256 signatures when using id_token parsing for ADFS - tt.args.server.JwksURL = "" - - boltFile := newBoltFile() - tt.args.server.BoltPath = boltFile - - // Prepopulate BoltDB Database for Server - boltdb := bolt.NewClient() - boltdb.Path = boltFile - - logger := &chronograf.NoopLogger{} - build := chronograf.BuildInfo{ - Version: "pre-1.4.0.0", - Commit: "", - } - _ = boltdb.Open(ctx, logger, build) - - if tt.fields.Config != nil { - if err := boltdb.ConfigStore.Update(ctx, tt.fields.Config); err != nil { - t.Fatalf("failed to update global application config %v", err) - return - } - } - - // Populate Organizations - for i, mapping := range tt.fields.Mappings { - o, err := boltdb.MappingsStore.Add(ctx, &mapping) - if err != nil { - t.Fatalf("failed to add mapping: %v", err) - return - } - tt.fields.Mappings[i] = *o - } - - // Populate Organizations - for i, organization := range tt.fields.Organizations { - o, err := boltdb.OrganizationsStore.Add(ctx, &organization) - if err != nil { - t.Fatalf("failed to add organization: %v", err) - return - } - tt.fields.Organizations[i] = *o - } - - // Populate Users - for i, user := range tt.fields.Users { - u, err := boltdb.UsersStore.Add(ctx, &user) - if err != nil { - t.Fatalf("failed to add user: %v", err) - return - } - tt.fields.Users[i] = *u - } - - // Populate Sources - for i, source := range tt.fields.Sources { - s, err := boltdb.SourcesStore.Add(ctx, source) - if err != nil { - t.Fatalf("failed to add source: %v", err) - return - } - tt.fields.Sources[i] = s - } - - // Populate Servers - for i, server := range tt.fields.Servers { - s, err := boltdb.ServersStore.Add(ctx, server) - if err != nil { - t.Fatalf("failed to add server: %v", err) - return - } - tt.fields.Servers[i] = s - } - - // Populate Layouts - for i, layout := range tt.fields.Layouts { - l, err := boltdb.LayoutsStore.Add(ctx, layout) - if err != nil { - t.Fatalf("failed to add layout: %v", err) - return - } - tt.fields.Layouts[i] = l - } - - // Populate Dashboards - for i, dashboard := range tt.fields.Dashboards { - d, err := boltdb.DashboardsStore.Add(ctx, dashboard) - if err != nil { - t.Fatalf("failed to add dashboard: %v", err) - return - } - tt.fields.Dashboards[i] = d - } - - _ = boltdb.Close() - - go tt.args.server.Serve(ctx) - serverURL := fmt.Sprintf("http://%v:%v%v", host, port, tt.args.path) - - // Wait for the server to come online - timeout := time.Now().Add(30 * time.Second) - for { - _, err := http.Get(serverURL + "/swagger.json") - if err == nil { - break - } - if time.Now().After(timeout) { - t.Fatalf("failed to start server") - return - } - } - - // Set the Expiry time on the principal - tt.args.principal.IssuedAt = time.Now() - tt.args.principal.ExpiresAt = time.Now().Add(10 * time.Second) - - // Construct HTTP Request - buf, _ := json.Marshal(tt.args.payload) - reqBody := ioutil.NopCloser(bytes.NewReader(buf)) - req, _ := http.NewRequest(tt.args.method, serverURL, reqBody) - token, _ := oauth2.NewJWT(tt.args.server.TokenSecret, tt.args.server.JwksURL).Create(ctx, tt.args.principal) - req.AddCookie(&http.Cookie{ - Name: "session", - Value: string(token), - HttpOnly: true, - Path: "/", - }) - - // Make actual http request - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("failed to make httprequest: %v", err) - return - } - - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf( - "%s %s Status Code = %v, want %v", - tt.args.method, - tt.args.path, - resp.StatusCode, - tt.wants.statusCode, - ) - } - - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf( - "%s %s Content Type = %v, want %v", - tt.args.method, - tt.args.path, - content, - tt.wants.contentType, - ) - } - - if eq, err := jsonEqual(tt.wants.body, string(body)); err != nil || !eq { - t.Errorf( - "%s %s Body = %v, want %v", - tt.args.method, - tt.args.path, - string(body), - tt.wants.body, - ) - } - - tt.args.server.Listener.Close() - }) - } -} diff --git a/chronograf/integrations/testdata/example.kap b/chronograf/integrations/testdata/example.kap deleted file mode 100644 index 611216d0811..00000000000 --- a/chronograf/integrations/testdata/example.kap +++ /dev/null @@ -1,8 +0,0 @@ -{ - "id": "5000", - "srcID": "5000", - "name": "Kapa 1", - "url": "http://localhost:9092", - "active": true, - "organization": "howdy" -} diff --git a/chronograf/integrations/testdata/example.org b/chronograf/integrations/testdata/example.org deleted file mode 100644 index 21031e50b16..00000000000 --- a/chronograf/integrations/testdata/example.org +++ /dev/null @@ -1,5 +0,0 @@ -{ - "id": "howdy", - "name": "An Organization", - "defaultRole": "viewer" -} diff --git a/chronograf/integrations/testdata/example.src b/chronograf/integrations/testdata/example.src deleted file mode 100644 index 2e92c7fc659..00000000000 --- a/chronograf/integrations/testdata/example.src +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "5000", - "name": "Influx 1", - "username": "user1", - "password": "pass1", - "url": "http://localhost:8086", - "metaUrl": "http://metaurl.com", - "type": "influx-enterprise", - "insecureSkipVerify": false, - "default": true, - "telegraf": "telegraf", - "sharedSecret": "cubeapples", - "organization": "howdy" -} diff --git a/chronograf/integrations/testdata/mydash.dashboard b/chronograf/integrations/testdata/mydash.dashboard deleted file mode 100644 index 3e81b46dcef..00000000000 --- a/chronograf/integrations/testdata/mydash.dashboard +++ /dev/null @@ -1,189 +0,0 @@ -{ - "id": 1000, - "cells": [ - { - "i": "8f61c619-dd9b-4761-8aa8-577f27247093", - "x": 0, - "y": 0, - "w": 11, - "h": 5, - "name": "Untitled Cell", - "queries": [ - { - "query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time \u003e :dashboardTime: GROUP BY time(:interval:) FILL(null)", - "queryConfig": { - "id": "b20baa61-bacb-4a17-b27d-b904a0d18114", - "database": "telegraf", - "measurement": "cpg", - "retentionPolicy": "autogen", - "fields": [ - { - "value": "mean", - "type": "func", - "alias": "mean_value", - "args": [ - { - "value": "value", - "type": "field", - "alias": "" - } - ] - } - ], - "tags": {}, - "groupBy": { - "time": "auto", - "tags": [] - }, - "areTagsAccepted": true, - "fill": "null", - "rawText": null, - "range": null, - "shifts": [] - }, - "source": "/chronograf/v1/sources/2" - } - ], - "axes": { - "x": { - "bounds": [], - "label": "", - "prefix": "", - "suffix": "", - "base": "10", - "scale": "linear" - }, - "y": { - "bounds": [], - "label": "", - "prefix": "", - "suffix": "", - "base": "10", - "scale": "linear" - }, - "y2": { - "bounds": [], - "label": "", - "prefix": "", - "suffix": "", - "base": "10", - "scale": "linear" - } - }, - "type": "line", - "colors": [ - { - "id": "0", - "type": "min", - "hex": "#00C9FF", - "name": "laser", - "value": "0" - }, - { - "id": "1", - "type": "max", - "hex": "#9394FF", - "name": "comet", - "value": "100" - } - ], - "legend": { - "type": "static", - "orientation": "bottom" - } - } - ], - "templates": [ - { - "tempVar": ":dbs:", - "values": [ - { - "value": "_internal", - "type": "database", - "selected": true - }, - { - "value": "telegraf", - "type": "database", - "selected": false - }, - { - "value": "tensorflowdb", - "type": "database", - "selected": false - }, - { - "value": "pushgateway", - "type": "database", - "selected": false - }, - { - "value": "node_exporter", - "type": "database", - "selected": false - }, - { - "value": "mydb", - "type": "database", - "selected": false - }, - { - "value": "tiny", - "type": "database", - "selected": false - }, - { - "value": "blah", - "type": "database", - "selected": false - }, - { - "value": "test", - "type": "database", - "selected": false - }, - { - "value": "chronograf", - "type": "database", - "selected": false - }, - { - "value": "db_name", - "type": "database", - "selected": false - }, - { - "value": "demo", - "type": "database", - "selected": false - }, - { - "value": "eeg", - "type": "database", - "selected": false - }, - { - "value": "solaredge", - "type": "database", - "selected": false - }, - { - "value": "zipkin", - "type": "database", - "selected": false - } - ], - "id": "e7e498bf-5869-4874-9071-24628a2cda63", - "type": "databases", - "label": "", - "query": { - "influxql": "SHOW DATABASES", - "measurement": "", - "tagKey": "", - "fieldKey": "" - } - } - ], - "name": "Name This Dashboard", - "organization": "howdy" - } diff --git a/chronograf/integrations/utils.go b/chronograf/integrations/utils.go deleted file mode 100644 index 2069c09595d..00000000000 --- a/chronograf/integrations/utils.go +++ /dev/null @@ -1,54 +0,0 @@ -package integrations - -import ( - "encoding/json" - "io/ioutil" - "net/http/httptest" - "net/url" - "strconv" - "strings" - - "github.com/google/go-cmp/cmp" -) - -func hostAndPort() (string, int) { - s := httptest.NewServer(nil) - defer s.Close() - - u, err := url.Parse(s.URL) - if err != nil { - panic(err) - } - xs := strings.Split(u.Host, ":") - host := xs[0] - portStr := xs[1] - port, err := strconv.Atoi(portStr) - if err != nil { - panic(err) - } - return host, port - -} - -func newBoltFile() string { - f, err := ioutil.TempFile("", "chronograf-bolt-") - if err != nil { - panic(err) - } - f.Close() - - return f.Name() -} - -func jsonEqual(s1, s2 string) (eq bool, err error) { - var o1, o2 interface{} - - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - return cmp.Equal(o1, o2), nil -} diff --git a/chronograf/kapacitor.go b/chronograf/kapacitor.go deleted file mode 100644 index f9fe23128d8..00000000000 --- a/chronograf/kapacitor.go +++ /dev/null @@ -1,159 +0,0 @@ -package chronograf - -import "encoding/json" - -// AlertNodes defines all possible kapacitor interactions with an alert. -type AlertNodes struct { - IsStateChangesOnly bool `json:"stateChangesOnly"` // IsStateChangesOnly will only send alerts on state changes. - UseFlapping bool `json:"useFlapping"` // UseFlapping enables flapping detection. Flapping occurs when a service or host changes state too frequently, resulting in a storm of problem and recovery notification - Posts []*Post `json:"post"` // HTTPPost will post the JSON alert data to the specified URLs. - TCPs []*TCP `json:"tcp"` // TCP will send the JSON alert data to the specified endpoint via TCP. - Email []*Email `json:"email"` // Email will send alert data to the specified emails. - Exec []*Exec `json:"exec"` // Exec will run shell commands when an alert triggers - Log []*Log `json:"log"` // Log will log JSON alert data to files in JSON lines format. - VictorOps []*VictorOps `json:"victorOps"` // VictorOps will send alert to all VictorOps - PagerDuty []*PagerDuty `json:"pagerDuty"` // PagerDuty will send alert to all PagerDuty - PagerDuty2 []*PagerDuty `json:"pagerDuty2"` // PagerDuty2 will send alert to PagerDuty v2 - Pushover []*Pushover `json:"pushover"` // Pushover will send alert to all Pushover - Sensu []*Sensu `json:"sensu"` // Sensu will send alert to all Sensu - Slack []*Slack `json:"slack"` // Slack will send alert to Slack - Telegram []*Telegram `json:"telegram"` // Telegram will send alert to all Telegram - HipChat []*HipChat `json:"hipChat"` // HipChat will send alert to all HipChat - Alerta []*Alerta `json:"alerta"` // Alerta will send alert to all Alerta - OpsGenie []*OpsGenie `json:"opsGenie"` // OpsGenie will send alert to all OpsGenie - OpsGenie2 []*OpsGenie `json:"opsGenie2"` // OpsGenie2 will send alert to all OpsGenie v2 - Talk []*Talk `json:"talk"` // Talk will send alert to all Talk - Kafka []*Kafka `json:"kafka"` // Kafka will send alert to all Kafka -} - -// Post will POST alerts to a destination URL -type Post struct { - URL string `json:"url"` // URL is the destination of the POST. - Headers map[string]string `json:"headers"` // Headers are added to the output POST -} - -// Log sends the output of the alert to a file -type Log struct { - FilePath string `json:"filePath"` // Absolute path the the log file; it will be created if it does not exist. -} - -// Alerta sends the output of the alert to an alerta service -type Alerta struct { - Token string `json:"token"` // Token is the authentication token that overrides the global configuration. - Resource string `json:"resource"` // Resource under alarm, deliberately not host-centric - Event string `json:"event"` // Event is the event name eg. NodeDown, QUEUE:LENGTH:EXCEEDED - Environment string `json:"environment"` // Environment is the effected environment; used to namespace the resource - Group string `json:"group"` // Group is an event group used to group events of similar type - Value string `json:"value"` // Value is the event value eg. 100%, Down, PingFail, 55ms, ORA-1664 - Origin string `json:"origin"` // Origin is the name of monitoring component that generated the alert - Service []string `json:"service"` // Service is the list of affected services -} - -// Exec executes a shell command on an alert -type Exec struct { - Command []string `json:"command"` // Command is the space separated command and args to execute. -} - -// TCP sends the alert to the address -type TCP struct { - Address string `json:"address"` // Endpoint is the Address and port to send the alert -} - -// Email sends the alert to a list of email addresses -type Email struct { - To []string `json:"to"` // ToList is the list of email recipients. -} - -// VictorOps sends alerts to the victorops.com service -type VictorOps struct { - RoutingKey string `json:"routingKey"` // RoutingKey is what is used to map the alert to a team -} - -// PagerDuty sends alerts to the pagerduty.com service -type PagerDuty struct { - ServiceKey string `json:"serviceKey"` // ServiceKey is the GUID of one of the "Generic API" integrations -} - -// HipChat sends alerts to stride.com -type HipChat struct { - Room string `json:"room"` // Room is the HipChat room to post messages. - Token string `json:"token"` // Token is the HipChat authentication token. -} - -// Sensu sends alerts to sensu or sensuapp.org -type Sensu struct { - Source string `json:"source"` // Source is the check source, used to create a proxy client for an external resource - Handlers []string `json:"handlers"` // Handlers are Sensu event handlers are for taking action on events -} - -// Pushover sends alerts to pushover.net -type Pushover struct { - // UserKey is the User/Group key of your user (or you), viewable when logged - // into the Pushover dashboard. Often referred to as USER_KEY - // in the Pushover documentation. - UserKey string `json:"userKey"` - - // Device is the users device name to send message directly to that device, - // rather than all of a user's devices (multiple device names may - // be separated by a comma) - Device string `json:"device"` - - // Title is your message's title, otherwise your apps name is used - Title string `json:"title"` - - // URL is a supplementary URL to show with your message - URL string `json:"url"` - - // URLTitle is a title for your supplementary URL, otherwise just URL is shown - URLTitle string `json:"urlTitle"` - - // Sound is the name of one of the sounds supported by the device clients to override - // the user's default sound choice - Sound string `json:"sound"` -} - -// Slack sends alerts to a slack.com channel -type Slack struct { - Channel string `json:"channel"` // Slack channel in which to post messages. - Username string `json:"username"` // Username of the Slack bot. - IconEmoji string `json:"iconEmoji"` // IconEmoji is an emoji name surrounded in ':' characters; The emoji image will replace the normal user icon for the slack bot. - Workspace string `json:"workspace"` // Workspace is the slack workspace for the alert handler -} - -// Telegram sends alerts to telegram.org -type Telegram struct { - ChatID string `json:"chatId"` // ChatID is the Telegram user/group ID to post messages to. - ParseMode string `json:"parseMode"` // ParseMode tells telegram how to render the message (Markdown or HTML) - DisableWebPagePreview bool `json:"disableWebPagePreview"` // IsDisableWebPagePreview will disables link previews in alert messages. - DisableNotification bool `json:"disableNotification"` // IsDisableNotification will disables notifications on iOS devices and disables sounds on Android devices. Android users continue to receive notifications. -} - -// OpsGenie sends alerts to opsgenie.com -type OpsGenie struct { - Teams []string `json:"teams"` // Teams that the alert will be routed to send notifications - Recipients []string `json:"recipients"` // Recipients can be a single user, group, escalation, or schedule (https://docs.opsgenie.com/docs/alert-recipients-and-teams) -} - -// Talk sends alerts to Jane Talk (https://jianliao.com/site) -type Talk struct{} - -// Kafka sends alerts to any Kafka brokers specified in the handler config -type Kafka struct { - Cluster string `json:"cluster"` - Topic string `json:"kafka-topic"` - Template string `json:"template"` -} - -// MarshalJSON converts AlertNodes to JSON -func (n *AlertNodes) MarshalJSON() ([]byte, error) { - type Alias AlertNodes - var raw = &struct { - Type string `json:"typeOf"` - *Alias - }{ - Type: "alert", - Alias: (*Alias)(n), - } - - return json.Marshal(raw) -} diff --git a/chronograf/memdb/kapacitors.go b/chronograf/memdb/kapacitors.go deleted file mode 100644 index 1635942d5db..00000000000 --- a/chronograf/memdb/kapacitors.go +++ /dev/null @@ -1,56 +0,0 @@ -package memdb - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure KapacitorStore implements chronograf.ServersStore. -var _ chronograf.ServersStore = &KapacitorStore{} - -// KapacitorStore implements the chronograf.ServersStore interface, and keeps -// an in-memory Kapacitor according to startup configuration -type KapacitorStore struct { - Kapacitor *chronograf.Server -} - -// All will return a slice containing a configured source -func (store *KapacitorStore) All(ctx context.Context) ([]chronograf.Server, error) { - if store.Kapacitor != nil { - return []chronograf.Server{*store.Kapacitor}, nil - } - return nil, nil -} - -// Add does not have any effect -func (store *KapacitorStore) Add(ctx context.Context, kap chronograf.Server) (chronograf.Server, error) { - return chronograf.Server{}, fmt.Errorf("in-memory KapacitorStore does not support adding a Kapacitor") -} - -// Delete removes the in-memory configured Kapacitor if its ID matches what's provided -func (store *KapacitorStore) Delete(ctx context.Context, kap chronograf.Server) error { - if store.Kapacitor == nil || store.Kapacitor.ID != kap.ID { - return fmt.Errorf("unable to find Kapacitor with id %d", kap.ID) - } - store.Kapacitor = nil - return nil -} - -// Get returns the in-memory Kapacitor if its ID matches what's provided -func (store *KapacitorStore) Get(ctx context.Context, id int) (chronograf.Server, error) { - if store.Kapacitor == nil || store.Kapacitor.ID != id { - return chronograf.Server{}, fmt.Errorf("unable to find Kapacitor with id %d", id) - } - return *store.Kapacitor, nil -} - -// Update overwrites the in-memory configured Kapacitor if its ID matches what's provided -func (store *KapacitorStore) Update(ctx context.Context, kap chronograf.Server) error { - if store.Kapacitor == nil || store.Kapacitor.ID != kap.ID { - return fmt.Errorf("unable to find Kapacitor with id %d", kap.ID) - } - store.Kapacitor = &kap - return nil -} diff --git a/chronograf/memdb/kapacitors_test.go b/chronograf/memdb/kapacitors_test.go deleted file mode 100644 index 74a4cbbae3c..00000000000 --- a/chronograf/memdb/kapacitors_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package memdb - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestInterfaceImplementation(t *testing.T) { - var _ chronograf.ServersStore = &KapacitorStore{} -} - -func TestKapacitorStoreAll(t *testing.T) { - ctx := context.Background() - - store := KapacitorStore{} - kaps, err := store.All(ctx) - if err != nil { - t.Fatal("All should not throw an error with an empty Store") - } - if len(kaps) != 0 { - t.Fatal("Store should be empty") - } - - store.Kapacitor = &chronograf.Server{} - kaps, err = store.All(ctx) - if err != nil { - t.Fatal("All should not throw an error with an empty Store") - } - if len(kaps) != 1 { - t.Fatal("Store should have 1 element") - } -} - -func TestKapacitorStoreAdd(t *testing.T) { - ctx := context.Background() - - store := KapacitorStore{} - _, err := store.Add(ctx, chronograf.Server{}) - if err == nil { - t.Fatal("Store should not support adding another source") - } -} - -func TestKapacitorStoreDelete(t *testing.T) { - ctx := context.Background() - - store := KapacitorStore{} - err := store.Delete(ctx, chronograf.Server{}) - if err == nil { - t.Fatal("Delete should not operate on an empty Store") - } - - store.Kapacitor = &chronograf.Server{ - ID: 9, - } - err = store.Delete(ctx, chronograf.Server{ - ID: 8, - }) - if err == nil { - t.Fatal("Delete should not remove elements with the wrong ID") - } - - err = store.Delete(ctx, chronograf.Server{ - ID: 9, - }) - if err != nil { - t.Fatal("Delete should remove an element with a matching ID") - } -} - -func TestKapacitorStoreGet(t *testing.T) { - ctx := context.Background() - - store := KapacitorStore{} - _, err := store.Get(ctx, 9) - if err == nil { - t.Fatal("Get should return an error for an empty Store") - } - - store.Kapacitor = &chronograf.Server{ - ID: 9, - } - _, err = store.Get(ctx, 8) - if err == nil { - t.Fatal("Get should return an error if it finds no matches") - } - - store.Kapacitor = &chronograf.Server{ - ID: 9, - } - kap, err := store.Get(ctx, 9) - if err != nil || kap.ID != 9 { - t.Fatal("Get should find the element with a matching ID") - } -} - -func TestKapacitorStoreUpdate(t *testing.T) { - ctx := context.Background() - - store := KapacitorStore{} - err := store.Update(ctx, chronograf.Server{}) - if err == nil { - t.Fatal("Update fhouls return an error for an empty Store") - } - - store.Kapacitor = &chronograf.Server{ - ID: 9, - } - err = store.Update(ctx, chronograf.Server{ - ID: 8, - }) - if err == nil { - t.Fatal("Update should return an error if it finds no matches") - } - - store.Kapacitor = &chronograf.Server{ - ID: 9, - } - err = store.Update(ctx, chronograf.Server{ - ID: 9, - URL: "http://crystal.pepsi.com", - }) - if err != nil || store.Kapacitor.URL != "http://crystal.pepsi.com" { - t.Fatal("Update should overwrite elements with matching IDs") - } -} diff --git a/chronograf/memdb/sources.go b/chronograf/memdb/sources.go deleted file mode 100644 index 95f48517dbe..00000000000 --- a/chronograf/memdb/sources.go +++ /dev/null @@ -1,55 +0,0 @@ -package memdb - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure SourcesStore implements chronograf.SourcesStore. -var _ chronograf.SourcesStore = &SourcesStore{} - -// SourcesStore implements the chronograf.SourcesStore interface -type SourcesStore struct { - Source *chronograf.Source -} - -// Add does not have any effect -func (store *SourcesStore) Add(ctx context.Context, src chronograf.Source) (chronograf.Source, error) { - return chronograf.Source{}, fmt.Errorf("in-memory SourcesStore does not support adding a Source") -} - -// All will return a slice containing a configured source -func (store *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) { - if store.Source != nil { - return []chronograf.Source{*store.Source}, nil - } - return nil, nil -} - -// Delete removes the SourcesStore.Source if it matches the provided Source -func (store *SourcesStore) Delete(ctx context.Context, src chronograf.Source) error { - if store.Source == nil || store.Source.ID != src.ID { - return fmt.Errorf("unable to find Source with id %d", src.ID) - } - store.Source = nil - return nil -} - -// Get returns the configured source if the id matches -func (store *SourcesStore) Get(ctx context.Context, id int) (chronograf.Source, error) { - if store.Source == nil || store.Source.ID != id { - return chronograf.Source{}, fmt.Errorf("unable to find Source with id %d", id) - } - return *store.Source, nil -} - -// Update does nothing -func (store *SourcesStore) Update(ctx context.Context, src chronograf.Source) error { - if store.Source == nil || store.Source.ID != src.ID { - return fmt.Errorf("unable to find Source with id %d", src.ID) - } - store.Source = &src - return nil -} diff --git a/chronograf/memdb/sources_test.go b/chronograf/memdb/sources_test.go deleted file mode 100644 index f5b7a8bf908..00000000000 --- a/chronograf/memdb/sources_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package memdb - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestSourcesStore(t *testing.T) { - var _ chronograf.SourcesStore = &SourcesStore{} -} - -func TestSourcesStoreAdd(t *testing.T) { - ctx := context.Background() - - store := SourcesStore{} - _, err := store.Add(ctx, chronograf.Source{}) - if err == nil { - t.Fatal("Store should not support adding another source") - } -} - -func TestSourcesStoreAll(t *testing.T) { - ctx := context.Background() - - store := SourcesStore{} - srcs, err := store.All(ctx) - if err != nil { - t.Fatal("All should not throw an error with an empty Store") - } - if len(srcs) != 0 { - t.Fatal("Store should be empty") - } - - store.Source = &chronograf.Source{} - srcs, err = store.All(ctx) - if err != nil { - t.Fatal("All should not throw an error with an empty Store") - } - if len(srcs) != 1 { - t.Fatal("Store should have 1 element") - } -} - -func TestSourcesStoreDelete(t *testing.T) { - ctx := context.Background() - - store := SourcesStore{} - err := store.Delete(ctx, chronograf.Source{}) - if err == nil { - t.Fatal("Delete should not operate on an empty Store") - } - - store.Source = &chronograf.Source{ - ID: 9, - } - err = store.Delete(ctx, chronograf.Source{ - ID: 8, - }) - if err == nil { - t.Fatal("Delete should not remove elements with the wrong ID") - } - - err = store.Delete(ctx, chronograf.Source{ - ID: 9, - }) - if err != nil { - t.Fatal("Delete should remove an element with a matching ID") - } -} - -func TestSourcesStoreGet(t *testing.T) { - ctx := context.Background() - - store := SourcesStore{} - _, err := store.Get(ctx, 9) - if err == nil { - t.Fatal("Get should return an error for an empty Store") - } - - store.Source = &chronograf.Source{ - ID: 9, - } - _, err = store.Get(ctx, 8) - if err == nil { - t.Fatal("Get should return an error if it finds no matches") - } - - store.Source = &chronograf.Source{ - ID: 9, - } - src, err := store.Get(ctx, 9) - if err != nil || src.ID != 9 { - t.Fatal("Get should find the element with a matching ID") - } -} - -func TestSourcesStoreUpdate(t *testing.T) { - ctx := context.Background() - - store := SourcesStore{} - err := store.Update(ctx, chronograf.Source{}) - if err == nil { - t.Fatal("Update should return an error for an empty Store") - } - - store.Source = &chronograf.Source{ - ID: 9, - } - err = store.Update(ctx, chronograf.Source{ - ID: 8, - }) - if err == nil { - t.Fatal("Update should return an error if it finds no matches") - } - - store.Source = &chronograf.Source{ - ID: 9, - } - err = store.Update(ctx, chronograf.Source{ - ID: 9, - URL: "http://crystal.pepsi.com", - }) - if err != nil || store.Source.URL != "http://crystal.pepsi.com" { - t.Fatal("Update should overwrite elements with matching IDs") - } -} diff --git a/chronograf/mocks/auth.go b/chronograf/mocks/auth.go deleted file mode 100644 index f4302453b11..00000000000 --- a/chronograf/mocks/auth.go +++ /dev/null @@ -1,51 +0,0 @@ -package mocks - -import ( - "context" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -// Authenticator implements a OAuth2 authenticator -type Authenticator struct { - Principal oauth2.Principal - ValidateErr error - ExtendErr error - Serialized string -} - -// Validate returns Principal associated with authenticated and authorized -// entity if successful. -func (a *Authenticator) Validate(context.Context, *http.Request) (oauth2.Principal, error) { - return a.Principal, a.ValidateErr -} - -// Extend will extend the lifetime of a already validated Principal -func (a *Authenticator) Extend(ctx context.Context, w http.ResponseWriter, p oauth2.Principal) (oauth2.Principal, error) { - cookie := http.Cookie{} - - http.SetCookie(w, &cookie) - return a.Principal, a.ExtendErr -} - -// Authorize will grant privileges to a Principal -func (a *Authenticator) Authorize(ctx context.Context, w http.ResponseWriter, p oauth2.Principal) error { - cookie := http.Cookie{} - - http.SetCookie(w, &cookie) - return nil -} - -// Expire revokes privileges from a Principal -func (a *Authenticator) Expire(http.ResponseWriter) {} - -// ValidAuthorization returns the Principal -func (a *Authenticator) ValidAuthorization(ctx context.Context, serializedAuthorization string) (oauth2.Principal, error) { - return oauth2.Principal{}, nil -} - -// Serialize the serialized values stored on the Authenticator -func (a *Authenticator) Serialize(context.Context, oauth2.Principal) (string, error) { - return a.Serialized, nil -} diff --git a/chronograf/mocks/config.go b/chronograf/mocks/config.go deleted file mode 100644 index 5dd605d947a..00000000000 --- a/chronograf/mocks/config.go +++ /dev/null @@ -1,28 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ConfigStore stores global application configuration -type ConfigStore struct { - Config *chronograf.Config -} - -// Initialize is noop in mocks store -func (c ConfigStore) Initialize(ctx context.Context) error { - return nil -} - -// Get returns the whole global application configuration -func (c ConfigStore) Get(ctx context.Context) (*chronograf.Config, error) { - return c.Config, nil -} - -// Update updates the whole global application configuration -func (c ConfigStore) Update(ctx context.Context, config *chronograf.Config) error { - c.Config = config - return nil -} diff --git a/chronograf/mocks/dashboards.go b/chronograf/mocks/dashboards.go deleted file mode 100644 index f50c2f2f5ae..00000000000 --- a/chronograf/mocks/dashboards.go +++ /dev/null @@ -1,37 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.DashboardsStore = &DashboardsStore{} - -type DashboardsStore struct { - AddF func(ctx context.Context, newDashboard chronograf.Dashboard) (chronograf.Dashboard, error) - AllF func(ctx context.Context) ([]chronograf.Dashboard, error) - DeleteF func(ctx context.Context, target chronograf.Dashboard) error - GetF func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) - UpdateF func(ctx context.Context, target chronograf.Dashboard) error -} - -func (d *DashboardsStore) Add(ctx context.Context, newDashboard chronograf.Dashboard) (chronograf.Dashboard, error) { - return d.AddF(ctx, newDashboard) -} - -func (d *DashboardsStore) All(ctx context.Context) ([]chronograf.Dashboard, error) { - return d.AllF(ctx) -} - -func (d *DashboardsStore) Delete(ctx context.Context, target chronograf.Dashboard) error { - return d.DeleteF(ctx, target) -} - -func (d *DashboardsStore) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - return d.GetF(ctx, id) -} - -func (d *DashboardsStore) Update(ctx context.Context, target chronograf.Dashboard) error { - return d.UpdateF(ctx, target) -} diff --git a/chronograf/mocks/databases.go b/chronograf/mocks/databases.go deleted file mode 100644 index a5117ed96e9..00000000000 --- a/chronograf/mocks/databases.go +++ /dev/null @@ -1,69 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.Databases = &Databases{} - -// Databases mock allows all databases methods to be set for testing -type Databases struct { - AllDBF func(context.Context) ([]chronograf.Database, error) - ConnectF func(context.Context, *chronograf.Source) error - CreateDBF func(context.Context, *chronograf.Database) (*chronograf.Database, error) - DropDBF func(context.Context, string) error - - AllRPF func(context.Context, string) ([]chronograf.RetentionPolicy, error) - CreateRPF func(context.Context, string, *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) - UpdateRPF func(context.Context, string, string, *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) - DropRPF func(context.Context, string, string) error - - GetMeasurementsF func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) -} - -// AllDB lists all databases in the current data source -func (d *Databases) AllDB(ctx context.Context) ([]chronograf.Database, error) { - return d.AllDBF(ctx) -} - -// Connect connects to a database in the current data source -func (d *Databases) Connect(ctx context.Context, src *chronograf.Source) error { - return d.ConnectF(ctx, src) -} - -// CreateDB creates a database in the current data source -func (d *Databases) CreateDB(ctx context.Context, db *chronograf.Database) (*chronograf.Database, error) { - return d.CreateDBF(ctx, db) -} - -// DropDB drops a database in the current data source -func (d *Databases) DropDB(ctx context.Context, db string) error { - return d.DropDBF(ctx, db) -} - -// AllRP lists all retention policies in the current data source -func (d *Databases) AllRP(ctx context.Context, rpX string) ([]chronograf.RetentionPolicy, error) { - return d.AllRPF(ctx, rpX) -} - -// CreateRP creates a retention policy in the current data source -func (d *Databases) CreateRP(ctx context.Context, rpX string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) { - return d.CreateRPF(ctx, rpX, rp) -} - -// UpdateRP updates a retention policy in the current data source -func (d *Databases) UpdateRP(ctx context.Context, rpX string, rpY string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) { - return d.UpdateRPF(ctx, rpX, rpY, rp) -} - -// DropRP drops a retention policy in the current data source -func (d *Databases) DropRP(ctx context.Context, rpX string, rpY string) error { - return d.DropRPF(ctx, rpX, rpY) -} - -// GetMeasurements lists measurements in the current data source -func (d *Databases) GetMeasurements(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) { - return d.GetMeasurementsF(ctx, db, limit, offset) -} diff --git a/chronograf/mocks/kapacitor_client.go b/chronograf/mocks/kapacitor_client.go deleted file mode 100644 index 648952ea7d1..00000000000 --- a/chronograf/mocks/kapacitor_client.go +++ /dev/null @@ -1,34 +0,0 @@ -package mocks - -// TODO(desa): resolve kapacitor dependency - -//var _ kapacitor.KapaClient = &KapaClient{} -// -//// Client is a mock Kapacitor client -//type KapaClient struct { -// CreateTaskF func(opts client.CreateTaskOptions) (client.Task, error) -// DeleteTaskF func(link client.Link) error -// ListTasksF func(opts *client.ListTasksOptions) ([]client.Task, error) -// TaskF func(link client.Link, opts *client.TaskOptions) (client.Task, error) -// UpdateTaskF func(link client.Link, opts client.UpdateTaskOptions) (client.Task, error) -//} -// -//func (p *KapaClient) CreateTask(opts client.CreateTaskOptions) (client.Task, error) { -// return p.CreateTaskF(opts) -//} -// -//func (p *KapaClient) DeleteTask(link client.Link) error { -// return p.DeleteTaskF(link) -//} -// -//func (p *KapaClient) ListTasks(opts *client.ListTasksOptions) ([]client.Task, error) { -// return p.ListTasksF(opts) -//} -// -//func (p *KapaClient) Task(link client.Link, opts *client.TaskOptions) (client.Task, error) { -// return p.TaskF(link, opts) -//} -// -//func (p *KapaClient) UpdateTask(link client.Link, opts client.UpdateTaskOptions) (client.Task, error) { -// return p.UpdateTaskF(link, opts) -//} diff --git a/chronograf/mocks/layouts.go b/chronograf/mocks/layouts.go deleted file mode 100644 index 0f321d27e75..00000000000 --- a/chronograf/mocks/layouts.go +++ /dev/null @@ -1,37 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.LayoutsStore = &LayoutsStore{} - -type LayoutsStore struct { - AddF func(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) - AllF func(ctx context.Context) ([]chronograf.Layout, error) - DeleteF func(ctx context.Context, layout chronograf.Layout) error - GetF func(ctx context.Context, id string) (chronograf.Layout, error) - UpdateF func(ctx context.Context, layout chronograf.Layout) error -} - -func (s *LayoutsStore) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) { - return s.AddF(ctx, layout) -} - -func (s *LayoutsStore) All(ctx context.Context) ([]chronograf.Layout, error) { - return s.AllF(ctx) -} - -func (s *LayoutsStore) Delete(ctx context.Context, layout chronograf.Layout) error { - return s.DeleteF(ctx, layout) -} - -func (s *LayoutsStore) Get(ctx context.Context, id string) (chronograf.Layout, error) { - return s.GetF(ctx, id) -} - -func (s *LayoutsStore) Update(ctx context.Context, layout chronograf.Layout) error { - return s.UpdateF(ctx, layout) -} diff --git a/chronograf/mocks/logger.go b/chronograf/mocks/logger.go deleted file mode 100644 index 9f9d2e03be0..00000000000 --- a/chronograf/mocks/logger.go +++ /dev/null @@ -1,88 +0,0 @@ -package mocks - -import ( - "fmt" - "io" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// NewLogger returns a mock logger that implements chronograf.Logger -func NewLogger() chronograf.Logger { - return &TestLogger{} -} - -type LogMessage struct { - Level string - Body string -} - -// TestLogger is a chronograf.Logger which allows assertions to be made on the -// contents of its messages. -type TestLogger struct { - Messages []LogMessage -} - -func (tl *TestLogger) Debug(args ...interface{}) { - tl.Messages = append(tl.Messages, LogMessage{"debug", tl.stringify(args...)}) -} - -func (tl *TestLogger) Info(args ...interface{}) { - tl.Messages = append(tl.Messages, LogMessage{"info", tl.stringify(args...)}) -} - -func (tl *TestLogger) Error(args ...interface{}) { - tl.Messages = append(tl.Messages, LogMessage{"error", tl.stringify(args...)}) -} - -func (tl *TestLogger) WithField(key string, value interface{}) chronograf.Logger { - return tl -} - -func (tl *TestLogger) Writer() *io.PipeWriter { - _, write := io.Pipe() - return write -} - -// HasMessage will return true if the TestLogger has been called with an exact -// match of a particular log message at a particular log level -func (tl *TestLogger) HasMessage(level string, body string) bool { - for _, msg := range tl.Messages { - if msg.Level == level && msg.Body == body { - return true - } - } - return false -} - -func (tl *TestLogger) stringify(args ...interface{}) string { - out := []byte{} - for _, arg := range args[:len(args)-1] { - out = append(out, tl.stringifyArg(arg)...) - out = append(out, []byte(" ")...) - } - out = append(out, tl.stringifyArg(args[len(args)-1])...) - return string(out) -} - -func (tl *TestLogger) stringifyArg(arg interface{}) []byte { - switch a := arg.(type) { - case fmt.Stringer: - return []byte(a.String()) - case error: - return []byte(a.Error()) - case string: - return []byte(a) - default: - return []byte("UNKNOWN") - } -} - -// Dump dumps out logs into a given testing.T's logs -func (tl *TestLogger) Dump(t *testing.T) { - t.Log("== Dumping Test Logs ==") - for _, msg := range tl.Messages { - t.Logf("lvl: %s, msg: %s", msg.Level, msg.Body) - } -} diff --git a/chronograf/mocks/mapping.go b/chronograf/mocks/mapping.go deleted file mode 100644 index ec09bdb4dfe..00000000000 --- a/chronograf/mocks/mapping.go +++ /dev/null @@ -1,35 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type MappingsStore struct { - AddF func(context.Context, *chronograf.Mapping) (*chronograf.Mapping, error) - AllF func(context.Context) ([]chronograf.Mapping, error) - DeleteF func(context.Context, *chronograf.Mapping) error - UpdateF func(context.Context, *chronograf.Mapping) error - GetF func(context.Context, string) (*chronograf.Mapping, error) -} - -func (s *MappingsStore) Add(ctx context.Context, m *chronograf.Mapping) (*chronograf.Mapping, error) { - return s.AddF(ctx, m) -} - -func (s *MappingsStore) All(ctx context.Context) ([]chronograf.Mapping, error) { - return s.AllF(ctx) -} - -func (s *MappingsStore) Delete(ctx context.Context, m *chronograf.Mapping) error { - return s.DeleteF(ctx, m) -} - -func (s *MappingsStore) Get(ctx context.Context, id string) (*chronograf.Mapping, error) { - return s.GetF(ctx, id) -} - -func (s *MappingsStore) Update(ctx context.Context, m *chronograf.Mapping) error { - return s.UpdateF(ctx, m) -} diff --git a/chronograf/mocks/org_config.go b/chronograf/mocks/org_config.go deleted file mode 100644 index 6f0715f2add..00000000000 --- a/chronograf/mocks/org_config.go +++ /dev/null @@ -1,22 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.OrganizationConfigStore = &OrganizationConfigStore{} - -type OrganizationConfigStore struct { - FindOrCreateF func(ctx context.Context, id string) (*chronograf.OrganizationConfig, error) - PutF func(ctx context.Context, c *chronograf.OrganizationConfig) error -} - -func (s *OrganizationConfigStore) FindOrCreate(ctx context.Context, id string) (*chronograf.OrganizationConfig, error) { - return s.FindOrCreateF(ctx, id) -} - -func (s *OrganizationConfigStore) Put(ctx context.Context, c *chronograf.OrganizationConfig) error { - return s.PutF(ctx, c) -} diff --git a/chronograf/mocks/organizations.go b/chronograf/mocks/organizations.go deleted file mode 100644 index ddb734a6284..00000000000 --- a/chronograf/mocks/organizations.go +++ /dev/null @@ -1,47 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.OrganizationsStore = &OrganizationsStore{} - -type OrganizationsStore struct { - AllF func(context.Context) ([]chronograf.Organization, error) - AddF func(context.Context, *chronograf.Organization) (*chronograf.Organization, error) - DeleteF func(context.Context, *chronograf.Organization) error - GetF func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) - UpdateF func(context.Context, *chronograf.Organization) error - CreateDefaultF func(context.Context) error - DefaultOrganizationF func(context.Context) (*chronograf.Organization, error) -} - -func (s *OrganizationsStore) CreateDefault(ctx context.Context) error { - return s.CreateDefaultF(ctx) -} - -func (s *OrganizationsStore) DefaultOrganization(ctx context.Context) (*chronograf.Organization, error) { - return s.DefaultOrganizationF(ctx) -} - -func (s *OrganizationsStore) Add(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) { - return s.AddF(ctx, o) -} - -func (s *OrganizationsStore) All(ctx context.Context) ([]chronograf.Organization, error) { - return s.AllF(ctx) -} - -func (s *OrganizationsStore) Delete(ctx context.Context, o *chronograf.Organization) error { - return s.DeleteF(ctx, o) -} - -func (s *OrganizationsStore) Get(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return s.GetF(ctx, q) -} - -func (s *OrganizationsStore) Update(ctx context.Context, o *chronograf.Organization) error { - return s.UpdateF(ctx, o) -} diff --git a/chronograf/mocks/response.go b/chronograf/mocks/response.go deleted file mode 100644 index ecc4e9fe495..00000000000 --- a/chronograf/mocks/response.go +++ /dev/null @@ -1,20 +0,0 @@ -package mocks - -// NewResponse returns a mocked chronograf.Response -func NewResponse(res string, err error) *Response { - return &Response{ - res: res, - err: err, - } -} - -// Response is a mocked chronograf.Response -type Response struct { - res string - err error -} - -// MarshalJSON returns the res and err as the fake response. -func (r *Response) MarshalJSON() ([]byte, error) { - return []byte(r.res), r.err -} diff --git a/chronograf/mocks/roles.go b/chronograf/mocks/roles.go deleted file mode 100644 index 235ae61df10..00000000000 --- a/chronograf/mocks/roles.go +++ /dev/null @@ -1,43 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.RolesStore = &RolesStore{} - -// RolesStore mock allows all functions to be set for testing -type RolesStore struct { - AllF func(context.Context) ([]chronograf.Role, error) - AddF func(context.Context, *chronograf.Role) (*chronograf.Role, error) - DeleteF func(context.Context, *chronograf.Role) error - GetF func(ctx context.Context, name string) (*chronograf.Role, error) - UpdateF func(context.Context, *chronograf.Role) error -} - -// All lists all Roles from the RolesStore -func (s *RolesStore) All(ctx context.Context) ([]chronograf.Role, error) { - return s.AllF(ctx) -} - -// Add a new Role in the RolesStore -func (s *RolesStore) Add(ctx context.Context, u *chronograf.Role) (*chronograf.Role, error) { - return s.AddF(ctx, u) -} - -// Delete the Role from the RolesStore -func (s *RolesStore) Delete(ctx context.Context, u *chronograf.Role) error { - return s.DeleteF(ctx, u) -} - -// Get retrieves a Role if name exists. -func (s *RolesStore) Get(ctx context.Context, name string) (*chronograf.Role, error) { - return s.GetF(ctx, name) -} - -// Update the Role's permissions or users -func (s *RolesStore) Update(ctx context.Context, u *chronograf.Role) error { - return s.UpdateF(ctx, u) -} diff --git a/chronograf/mocks/servers.go b/chronograf/mocks/servers.go deleted file mode 100644 index 9eea65044ff..00000000000 --- a/chronograf/mocks/servers.go +++ /dev/null @@ -1,38 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.ServersStore = &ServersStore{} - -// ServersStore mock allows all functions to be set for testing -type ServersStore struct { - AllF func(context.Context) ([]chronograf.Server, error) - AddF func(context.Context, chronograf.Server) (chronograf.Server, error) - DeleteF func(context.Context, chronograf.Server) error - GetF func(ctx context.Context, ID int) (chronograf.Server, error) - UpdateF func(context.Context, chronograf.Server) error -} - -func (s *ServersStore) All(ctx context.Context) ([]chronograf.Server, error) { - return s.AllF(ctx) -} - -func (s *ServersStore) Add(ctx context.Context, srv chronograf.Server) (chronograf.Server, error) { - return s.AddF(ctx, srv) -} - -func (s *ServersStore) Delete(ctx context.Context, srv chronograf.Server) error { - return s.DeleteF(ctx, srv) -} - -func (s *ServersStore) Get(ctx context.Context, id int) (chronograf.Server, error) { - return s.GetF(ctx, id) -} - -func (s *ServersStore) Update(ctx context.Context, srv chronograf.Server) error { - return s.UpdateF(ctx, srv) -} diff --git a/chronograf/mocks/sources.go b/chronograf/mocks/sources.go deleted file mode 100644 index 5a77b2cb41c..00000000000 --- a/chronograf/mocks/sources.go +++ /dev/null @@ -1,43 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.SourcesStore = &SourcesStore{} - -// SourcesStore mock allows all functions to be set for testing -type SourcesStore struct { - AllF func(context.Context) ([]chronograf.Source, error) - AddF func(context.Context, chronograf.Source) (chronograf.Source, error) - DeleteF func(context.Context, chronograf.Source) error - GetF func(ctx context.Context, ID int) (chronograf.Source, error) - UpdateF func(context.Context, chronograf.Source) error -} - -// All returns all sources in the store -func (s *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) { - return s.AllF(ctx) -} - -// Add creates a new source in the SourcesStore and returns Source with ID -func (s *SourcesStore) Add(ctx context.Context, src chronograf.Source) (chronograf.Source, error) { - return s.AddF(ctx, src) -} - -// Delete the Source from the store -func (s *SourcesStore) Delete(ctx context.Context, src chronograf.Source) error { - return s.DeleteF(ctx, src) -} - -// Get retrieves Source if `ID` exists -func (s *SourcesStore) Get(ctx context.Context, ID int) (chronograf.Source, error) { - return s.GetF(ctx, ID) -} - -// Update the Source in the store. -func (s *SourcesStore) Update(ctx context.Context, src chronograf.Source) error { - return s.UpdateF(ctx, src) -} diff --git a/chronograf/mocks/store.go b/chronograf/mocks/store.go deleted file mode 100644 index f22df100cdc..00000000000 --- a/chronograf/mocks/store.go +++ /dev/null @@ -1,55 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Store is a server.DataStore -type Store struct { - SourcesStore chronograf.SourcesStore - MappingsStore chronograf.MappingsStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - OrganizationsStore chronograf.OrganizationsStore - ConfigStore chronograf.ConfigStore - OrganizationConfigStore chronograf.OrganizationConfigStore -} - -func (s *Store) Sources(ctx context.Context) chronograf.SourcesStore { - return s.SourcesStore -} - -func (s *Store) Servers(ctx context.Context) chronograf.ServersStore { - return s.ServersStore -} - -func (s *Store) Layouts(ctx context.Context) chronograf.LayoutsStore { - return s.LayoutsStore -} - -func (s *Store) Users(ctx context.Context) chronograf.UsersStore { - return s.UsersStore -} - -func (s *Store) Organizations(ctx context.Context) chronograf.OrganizationsStore { - return s.OrganizationsStore -} -func (s *Store) Mappings(ctx context.Context) chronograf.MappingsStore { - return s.MappingsStore -} - -func (s *Store) Dashboards(ctx context.Context) chronograf.DashboardsStore { - return s.DashboardsStore -} - -func (s *Store) Config(ctx context.Context) chronograf.ConfigStore { - return s.ConfigStore -} - -func (s *Store) OrganizationConfig(ctx context.Context) chronograf.OrganizationConfigStore { - return s.OrganizationConfigStore -} diff --git a/chronograf/mocks/timeseries.go b/chronograf/mocks/timeseries.go deleted file mode 100644 index 3d1a1b8a960..00000000000 --- a/chronograf/mocks/timeseries.go +++ /dev/null @@ -1,60 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.TimeSeries = &TimeSeries{} - -// TimeSeries is a mockable chronograf time series by overriding the functions. -type TimeSeries struct { - // Connect will connect to the time series using the information in `Source`. - ConnectF func(context.Context, *chronograf.Source) error - // Query retrieves time series data from the database. - QueryF func(context.Context, chronograf.Query) (chronograf.Response, error) - // Write records points into the TimeSeries - WriteF func(context.Context, []chronograf.Point) error - // UsersStore represents the user accounts within the TimeSeries database - UsersF func(context.Context) chronograf.UsersStore - // Permissions returns all valid names permissions in this database - PermissionsF func(context.Context) chronograf.Permissions - // RolesF represents the roles. Roles group permissions and Users - RolesF func(context.Context) (chronograf.RolesStore, error) -} - -// New implements TimeSeriesClient -func (t *TimeSeries) New(chronograf.Source, chronograf.Logger) (chronograf.TimeSeries, error) { - return t, nil -} - -// Connect will connect to the time series using the information in `Source`. -func (t *TimeSeries) Connect(ctx context.Context, src *chronograf.Source) error { - return t.ConnectF(ctx, src) -} - -// Query retrieves time series data from the database. -func (t *TimeSeries) Query(ctx context.Context, query chronograf.Query) (chronograf.Response, error) { - return t.QueryF(ctx, query) -} - -// Write records a point into the time series -func (t *TimeSeries) Write(ctx context.Context, points []chronograf.Point) error { - return t.WriteF(ctx, points) -} - -// Users represents the user accounts within the TimeSeries database -func (t *TimeSeries) Users(ctx context.Context) chronograf.UsersStore { - return t.UsersF(ctx) -} - -// Roles represents the roles. Roles group permissions and Users -func (t *TimeSeries) Roles(ctx context.Context) (chronograf.RolesStore, error) { - return t.RolesF(ctx) -} - -// Permissions returns all valid names permissions in this database -func (t *TimeSeries) Permissions(ctx context.Context) chronograf.Permissions { - return t.PermissionsF(ctx) -} diff --git a/chronograf/mocks/users.go b/chronograf/mocks/users.go deleted file mode 100644 index 9e3646e1136..00000000000 --- a/chronograf/mocks/users.go +++ /dev/null @@ -1,49 +0,0 @@ -package mocks - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.UsersStore = &UsersStore{} - -// UsersStore mock allows all functions to be set for testing -type UsersStore struct { - AllF func(context.Context) ([]chronograf.User, error) - AddF func(context.Context, *chronograf.User) (*chronograf.User, error) - DeleteF func(context.Context, *chronograf.User) error - GetF func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) - UpdateF func(context.Context, *chronograf.User) error - NumF func(context.Context) (int, error) -} - -// All lists all users from the UsersStore -func (s *UsersStore) All(ctx context.Context) ([]chronograf.User, error) { - return s.AllF(ctx) -} - -// Num returns the number of users in the UsersStore -func (s *UsersStore) Num(ctx context.Context) (int, error) { - return s.NumF(ctx) -} - -// Add a new User in the UsersStore -func (s *UsersStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return s.AddF(ctx, u) -} - -// Delete the User from the UsersStore -func (s *UsersStore) Delete(ctx context.Context, u *chronograf.User) error { - return s.DeleteF(ctx, u) -} - -// Get retrieves a user if name exists. -func (s *UsersStore) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return s.GetF(ctx, q) -} - -// Update the user's permissions or roles -func (s *UsersStore) Update(ctx context.Context, u *chronograf.User) error { - return s.UpdateF(ctx, u) -} diff --git a/chronograf/multistore/dashboards.go b/chronograf/multistore/dashboards.go deleted file mode 100644 index d5d275ea2b9..00000000000 --- a/chronograf/multistore/dashboards.go +++ /dev/null @@ -1,97 +0,0 @@ -package multistore - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure DashboardsStore implements chronograf.DashboardsStore. -var _ chronograf.DashboardsStore = &DashboardsStore{} - -// DashboardsStore implements the chronograf.DashboardsStore interface, and -// delegates to all contained DashboardsStores -type DashboardsStore struct { - Stores []chronograf.DashboardsStore -} - -// All concatenates the Dashboards of all contained Stores -func (multi *DashboardsStore) All(ctx context.Context) ([]chronograf.Dashboard, error) { - all := []chronograf.Dashboard{} - boardSet := map[chronograf.DashboardID]struct{}{} - - ok := false - var err error - for _, store := range multi.Stores { - var boards []chronograf.Dashboard - boards, err = store.All(ctx) - if err != nil { - // If this Store is unable to return an array of dashboards, skip to the - // next Store. - continue - } - ok = true // We've received a response from at least one Store - for _, board := range boards { - // Enforce that the dashboard has a unique ID - // If the ID has been seen before, ignore the dashboard - if _, okay := boardSet[board.ID]; !okay { // We have a new dashboard - boardSet[board.ID] = struct{}{} // We just care that the ID is unique - all = append(all, board) - } - } - } - if !ok { - return nil, err - } - return all, nil -} - -// Add the dashboard to the first responsive Store -func (multi *DashboardsStore) Add(ctx context.Context, dashboard chronograf.Dashboard) (chronograf.Dashboard, error) { - var err error - for _, store := range multi.Stores { - var d chronograf.Dashboard - d, err = store.Add(ctx, dashboard) - if err == nil { - return d, nil - } - } - return chronograf.Dashboard{}, nil -} - -// Delete delegates to all Stores, returns success if one Store is successful -func (multi *DashboardsStore) Delete(ctx context.Context, dashboard chronograf.Dashboard) error { - var err error - for _, store := range multi.Stores { - err = store.Delete(ctx, dashboard) - if err == nil { - return nil - } - } - return err -} - -// Get finds the Dashboard by id among all contained Stores -func (multi *DashboardsStore) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - var err error - for _, store := range multi.Stores { - var d chronograf.Dashboard - d, err = store.Get(ctx, id) - if err == nil { - return d, nil - } - } - return chronograf.Dashboard{}, nil -} - -// Update the first responsive Store -func (multi *DashboardsStore) Update(ctx context.Context, dashboard chronograf.Dashboard) error { - var err error - for _, store := range multi.Stores { - err = store.Update(ctx, dashboard) - if err == nil { - return nil - } - } - return err -} diff --git a/chronograf/multistore/kapacitors.go b/chronograf/multistore/kapacitors.go deleted file mode 100644 index cd2f96bcb78..00000000000 --- a/chronograf/multistore/kapacitors.go +++ /dev/null @@ -1,97 +0,0 @@ -package multistore - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure KapacitorStore implements chronograf.ServersStore. -var _ chronograf.ServersStore = &KapacitorStore{} - -// KapacitorStore implements the chronograf.ServersStore interface, and -// delegates to all contained KapacitorStores -type KapacitorStore struct { - Stores []chronograf.ServersStore -} - -// All concatenates the Kapacitors of all contained Stores -func (multi *KapacitorStore) All(ctx context.Context) ([]chronograf.Server, error) { - all := []chronograf.Server{} - kapSet := map[int]struct{}{} - - ok := false - var err error - for _, store := range multi.Stores { - var kaps []chronograf.Server - kaps, err = store.All(ctx) - if err != nil { - // If this Store is unable to return an array of kapacitors, skip to the - // next Store. - continue - } - ok = true // We've received a response from at least one Store - for _, kap := range kaps { - // Enforce that the kapacitor has a unique ID - // If the ID has been seen before, ignore the kapacitor - if _, okay := kapSet[kap.ID]; !okay { // We have a new kapacitor - kapSet[kap.ID] = struct{}{} // We just care that the ID is unique - all = append(all, kap) - } - } - } - if !ok { - return nil, err - } - return all, nil -} - -// Add the kap to the first responsive Store -func (multi *KapacitorStore) Add(ctx context.Context, kap chronograf.Server) (chronograf.Server, error) { - var err error - for _, store := range multi.Stores { - var k chronograf.Server - k, err = store.Add(ctx, kap) - if err == nil { - return k, nil - } - } - return chronograf.Server{}, nil -} - -// Delete delegates to all Stores, returns success if one Store is successful -func (multi *KapacitorStore) Delete(ctx context.Context, kap chronograf.Server) error { - var err error - for _, store := range multi.Stores { - err = store.Delete(ctx, kap) - if err == nil { - return nil - } - } - return err -} - -// Get finds the Source by id among all contained Stores -func (multi *KapacitorStore) Get(ctx context.Context, id int) (chronograf.Server, error) { - var err error - for _, store := range multi.Stores { - var k chronograf.Server - k, err = store.Get(ctx, id) - if err == nil { - return k, nil - } - } - return chronograf.Server{}, nil -} - -// Update the first responsive Store -func (multi *KapacitorStore) Update(ctx context.Context, kap chronograf.Server) error { - var err error - for _, store := range multi.Stores { - err = store.Update(ctx, kap) - if err == nil { - return nil - } - } - return err -} diff --git a/chronograf/multistore/kapacitors_test.go b/chronograf/multistore/kapacitors_test.go deleted file mode 100644 index 1bc073a66c8..00000000000 --- a/chronograf/multistore/kapacitors_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package multistore - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestInterfaceImplementation(t *testing.T) { - var _ chronograf.ServersStore = &KapacitorStore{} -} diff --git a/chronograf/multistore/layouts.go b/chronograf/multistore/layouts.go deleted file mode 100644 index 900128b2ed7..00000000000 --- a/chronograf/multistore/layouts.go +++ /dev/null @@ -1,94 +0,0 @@ -package multistore - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Layouts is a LayoutsStore that contains multiple LayoutsStores -// The All method will return the set of all Layouts. -// Each method will be tried against the Stores slice serially. -type Layouts struct { - Stores []chronograf.LayoutsStore -} - -// All returns the set of all layouts -func (s *Layouts) All(ctx context.Context) ([]chronograf.Layout, error) { - all := []chronograf.Layout{} - layoutSet := map[string]chronograf.Layout{} - ok := false - var err error - for _, store := range s.Stores { - var layouts []chronograf.Layout - layouts, err = store.All(ctx) - if err != nil { - // Try to load as many layouts as possible - continue - } - ok = true - for _, l := range layouts { - // Enforce that the layout has a unique ID - // If the layout has been seen before then skip - if _, okay := layoutSet[l.ID]; !okay { - layoutSet[l.ID] = l - all = append(all, l) - } - } - } - if !ok { - return nil, err - } - return all, nil -} - -// Add creates a new dashboard in the LayoutsStore. Tries each store sequentially until success. -func (s *Layouts) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) { - var err error - for _, store := range s.Stores { - var l chronograf.Layout - l, err = store.Add(ctx, layout) - if err == nil { - return l, nil - } - } - return chronograf.Layout{}, err -} - -// Delete the dashboard from the store. Searches through all stores to find Layout and -// then deletes from that store. -func (s *Layouts) Delete(ctx context.Context, layout chronograf.Layout) error { - var err error - for _, store := range s.Stores { - err = store.Delete(ctx, layout) - if err == nil { - return nil - } - } - return err -} - -// Get retrieves Layout if `ID` exists. Searches through each store sequentially until success. -func (s *Layouts) Get(ctx context.Context, ID string) (chronograf.Layout, error) { - var err error - for _, store := range s.Stores { - var l chronograf.Layout - l, err = store.Get(ctx, ID) - if err == nil { - return l, nil - } - } - return chronograf.Layout{}, err -} - -// Update the dashboard in the store. Searches through each store sequentially until success. -func (s *Layouts) Update(ctx context.Context, layout chronograf.Layout) error { - var err error - for _, store := range s.Stores { - err = store.Update(ctx, layout) - if err == nil { - return nil - } - } - return err -} diff --git a/chronograf/multistore/organizations.go b/chronograf/multistore/organizations.go deleted file mode 100644 index 8bb9289e491..00000000000 --- a/chronograf/multistore/organizations.go +++ /dev/null @@ -1,129 +0,0 @@ -package multistore - -import ( - "context" - "fmt" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure OrganizationsStore implements chronograf.OrganizationsStore. -var _ chronograf.OrganizationsStore = &OrganizationsStore{} - -// OrganizationsStore implements the chronograf.OrganizationsStore interface, and -// delegates to all contained OrganizationsStores -type OrganizationsStore struct { - Stores []chronograf.OrganizationsStore -} - -// All concatenates the Organizations of all contained Stores -func (multi *OrganizationsStore) All(ctx context.Context) ([]chronograf.Organization, error) { - all := []chronograf.Organization{} - orgSet := map[string]struct{}{} - - ok := false - var err error - for _, store := range multi.Stores { - var orgs []chronograf.Organization - orgs, err = store.All(ctx) - if err != nil { - // If this Store is unable to return an array of orgs, skip to the - // next Store. - continue - } - ok = true // We've received a response from at least one Store - for _, org := range orgs { - // Enforce that the org has a unique ID - // If the ID has been seen before, ignore the org - if _, okay := orgSet[org.ID]; !okay { // We have a new org - orgSet[org.ID] = struct{}{} // We just care that the ID is unique - all = append(all, org) - } - } - } - if !ok { - return nil, err - } - return all, nil -} - -// Add the org to the first responsive Store -func (multi *OrganizationsStore) Add(ctx context.Context, org *chronograf.Organization) (*chronograf.Organization, error) { - errors := []string{} - for _, store := range multi.Stores { - var o *chronograf.Organization - o, err := store.Add(ctx, org) - if err == nil { - return o, nil - } - errors = append(errors, err.Error()) - } - return nil, fmt.Errorf("unknown error while adding organization: %s", strings.Join(errors, " ")) -} - -// Delete delegates to all Stores, returns success if one Store is successful -func (multi *OrganizationsStore) Delete(ctx context.Context, org *chronograf.Organization) error { - errors := []string{} - for _, store := range multi.Stores { - err := store.Delete(ctx, org) - if err == nil { - return nil - } - errors = append(errors, err.Error()) - } - return fmt.Errorf("unknown error while deleting organization: %s", strings.Join(errors, " ")) -} - -// Get finds the Organization by id among all contained Stores -func (multi *OrganizationsStore) Get(ctx context.Context, query chronograf.OrganizationQuery) (*chronograf.Organization, error) { - var err error - for _, store := range multi.Stores { - var o *chronograf.Organization - o, err = store.Get(ctx, query) - if err == nil { - return o, nil - } - } - return nil, chronograf.ErrOrganizationNotFound -} - -// Update the first responsive Store -func (multi *OrganizationsStore) Update(ctx context.Context, org *chronograf.Organization) error { - errors := []string{} - for _, store := range multi.Stores { - err := store.Update(ctx, org) - if err == nil { - return nil - } - errors = append(errors, err.Error()) - } - return fmt.Errorf("unknown error while updating organization: %s", strings.Join(errors, " ")) -} - -// CreateDefault makes a default organization in the first responsive Store -func (multi *OrganizationsStore) CreateDefault(ctx context.Context) error { - errors := []string{} - for _, store := range multi.Stores { - err := store.CreateDefault(ctx) - if err == nil { - return nil - } - errors = append(errors, err.Error()) - } - return fmt.Errorf("unknown error while creating default organization: %s", strings.Join(errors, " ")) -} - -// DefaultOrganization returns the first successful DefaultOrganization -func (multi *OrganizationsStore) DefaultOrganization(ctx context.Context) (*chronograf.Organization, error) { - errors := []string{} - for _, store := range multi.Stores { - org, err := store.DefaultOrganization(ctx) - if err == nil { - return org, nil - } - errors = append(errors, err.Error()) - } - return nil, fmt.Errorf("unknown error while getting default organization: %s", strings.Join(errors, " ")) - -} diff --git a/chronograf/multistore/sources.go b/chronograf/multistore/sources.go deleted file mode 100644 index 74fd4b99b2c..00000000000 --- a/chronograf/multistore/sources.go +++ /dev/null @@ -1,96 +0,0 @@ -package multistore - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure SourcesStore implements chronograf.SourcesStore. -var _ chronograf.SourcesStore = &SourcesStore{} - -// SourcesStore delegates to the SourcesStores that compose it -type SourcesStore struct { - Stores []chronograf.SourcesStore -} - -// All concatenates the Sources of all contained Stores -func (multi *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) { - all := []chronograf.Source{} - sourceSet := map[int]struct{}{} - - ok := false - var err error - for _, store := range multi.Stores { - var sources []chronograf.Source - sources, err = store.All(ctx) - if err != nil { - // If this Store is unable to return an array of sources, skip to the - // next Store. - continue - } - ok = true // We've received a response from at least one Store - for _, s := range sources { - // Enforce that the source has a unique ID - // If the source has been seen before, don't override what we already have - if _, okay := sourceSet[s.ID]; !okay { // We have a new Source! - sourceSet[s.ID] = struct{}{} // We just care that the ID is unique - all = append(all, s) - } - } - } - if !ok { - return nil, err - } - return all, nil -} - -// Add the src to the first Store to respond successfully -func (multi *SourcesStore) Add(ctx context.Context, src chronograf.Source) (chronograf.Source, error) { - var err error - for _, store := range multi.Stores { - var s chronograf.Source - s, err = store.Add(ctx, src) - if err == nil { - return s, nil - } - } - return chronograf.Source{}, nil -} - -// Delete delegates to all stores, returns success if one Store is successful -func (multi *SourcesStore) Delete(ctx context.Context, src chronograf.Source) error { - var err error - for _, store := range multi.Stores { - err = store.Delete(ctx, src) - if err == nil { - return nil - } - } - return err -} - -// Get finds the Source by id among all contained Stores -func (multi *SourcesStore) Get(ctx context.Context, id int) (chronograf.Source, error) { - var err error - for _, store := range multi.Stores { - var s chronograf.Source - s, err = store.Get(ctx, id) - if err == nil { - return s, nil - } - } - return chronograf.Source{}, err -} - -// Update the first store to return a successful response -func (multi *SourcesStore) Update(ctx context.Context, src chronograf.Source) error { - var err error - for _, store := range multi.Stores { - err = store.Update(ctx, src) - if err == nil { - return nil - } - } - return err -} diff --git a/chronograf/noop/config.go b/chronograf/noop/config.go deleted file mode 100644 index 2098736f146..00000000000 --- a/chronograf/noop/config.go +++ /dev/null @@ -1,26 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure ConfigStore implements chronograf.ConfigStore -var _ chronograf.ConfigStore = &ConfigStore{} - -type ConfigStore struct{} - -// TODO(desa): this really should be removed -func (s *ConfigStore) Initialize(context.Context) error { - return fmt.Errorf("cannot initialize") -} - -func (s *ConfigStore) Get(context.Context) (*chronograf.Config, error) { - return nil, chronograf.ErrConfigNotFound -} - -func (s *ConfigStore) Update(context.Context, *chronograf.Config) error { - return fmt.Errorf("cannot update conifg") -} diff --git a/chronograf/noop/dashboards.go b/chronograf/noop/dashboards.go deleted file mode 100644 index cbc06d332af..00000000000 --- a/chronograf/noop/dashboards.go +++ /dev/null @@ -1,33 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure DashboardsStore implements chronograf.DashboardsStore -var _ chronograf.DashboardsStore = &DashboardsStore{} - -type DashboardsStore struct{} - -func (s *DashboardsStore) All(context.Context) ([]chronograf.Dashboard, error) { - return nil, fmt.Errorf("no dashboards found") -} - -func (s *DashboardsStore) Add(context.Context, chronograf.Dashboard) (chronograf.Dashboard, error) { - return chronograf.Dashboard{}, fmt.Errorf("failed to add dashboard") -} - -func (s *DashboardsStore) Delete(context.Context, chronograf.Dashboard) error { - return fmt.Errorf("failed to delete dashboard") -} - -func (s *DashboardsStore) Get(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{}, chronograf.ErrDashboardNotFound -} - -func (s *DashboardsStore) Update(context.Context, chronograf.Dashboard) error { - return fmt.Errorf("failed to update dashboard") -} diff --git a/chronograf/noop/layouts.go b/chronograf/noop/layouts.go deleted file mode 100644 index 9ff2ad4ccf1..00000000000 --- a/chronograf/noop/layouts.go +++ /dev/null @@ -1,33 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure LayoutsStore implements chronograf.LayoutsStore -var _ chronograf.LayoutsStore = &LayoutsStore{} - -type LayoutsStore struct{} - -func (s *LayoutsStore) All(context.Context) ([]chronograf.Layout, error) { - return nil, fmt.Errorf("no layouts found") -} - -func (s *LayoutsStore) Add(context.Context, chronograf.Layout) (chronograf.Layout, error) { - return chronograf.Layout{}, fmt.Errorf("failed to add layout") -} - -func (s *LayoutsStore) Delete(context.Context, chronograf.Layout) error { - return fmt.Errorf("failed to delete layout") -} - -func (s *LayoutsStore) Get(ctx context.Context, ID string) (chronograf.Layout, error) { - return chronograf.Layout{}, chronograf.ErrLayoutNotFound -} - -func (s *LayoutsStore) Update(context.Context, chronograf.Layout) error { - return fmt.Errorf("failed to update layout") -} diff --git a/chronograf/noop/mappings.go b/chronograf/noop/mappings.go deleted file mode 100644 index 87696839246..00000000000 --- a/chronograf/noop/mappings.go +++ /dev/null @@ -1,33 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure MappingsStore implements chronograf.MappingsStore -var _ chronograf.MappingsStore = &MappingsStore{} - -type MappingsStore struct{} - -func (s *MappingsStore) All(context.Context) ([]chronograf.Mapping, error) { - return nil, fmt.Errorf("no mappings found") -} - -func (s *MappingsStore) Add(context.Context, *chronograf.Mapping) (*chronograf.Mapping, error) { - return nil, fmt.Errorf("failed to add mapping") -} - -func (s *MappingsStore) Delete(context.Context, *chronograf.Mapping) error { - return fmt.Errorf("failed to delete mapping") -} - -func (s *MappingsStore) Get(ctx context.Context, ID string) (*chronograf.Mapping, error) { - return nil, chronograf.ErrMappingNotFound -} - -func (s *MappingsStore) Update(context.Context, *chronograf.Mapping) error { - return fmt.Errorf("failed to update mapping") -} diff --git a/chronograf/noop/org_config.go b/chronograf/noop/org_config.go deleted file mode 100644 index 53da5ac6a6b..00000000000 --- a/chronograf/noop/org_config.go +++ /dev/null @@ -1,21 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure OrganizationConfigStore implements chronograf.OrganizationConfigStore -var _ chronograf.OrganizationConfigStore = &OrganizationConfigStore{} - -type OrganizationConfigStore struct{} - -func (s *OrganizationConfigStore) FindOrCreate(context.Context, string) (*chronograf.OrganizationConfig, error) { - return nil, chronograf.ErrOrganizationConfigNotFound -} - -func (s *OrganizationConfigStore) Put(context.Context, *chronograf.OrganizationConfig) error { - return fmt.Errorf("cannot replace config") -} diff --git a/chronograf/noop/organizations.go b/chronograf/noop/organizations.go deleted file mode 100644 index 0528bbb6d39..00000000000 --- a/chronograf/noop/organizations.go +++ /dev/null @@ -1,41 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure OrganizationsStore implements chronograf.OrganizationsStore -var _ chronograf.OrganizationsStore = &OrganizationsStore{} - -type OrganizationsStore struct{} - -func (s *OrganizationsStore) CreateDefault(context.Context) error { - return fmt.Errorf("failed to add organization") -} - -func (s *OrganizationsStore) DefaultOrganization(context.Context) (*chronograf.Organization, error) { - return nil, fmt.Errorf("failed to retrieve default organization") -} - -func (s *OrganizationsStore) All(context.Context) ([]chronograf.Organization, error) { - return nil, fmt.Errorf("no organizations found") -} - -func (s *OrganizationsStore) Add(context.Context, *chronograf.Organization) (*chronograf.Organization, error) { - return nil, fmt.Errorf("failed to add organization") -} - -func (s *OrganizationsStore) Delete(context.Context, *chronograf.Organization) error { - return fmt.Errorf("failed to delete organization") -} - -func (s *OrganizationsStore) Get(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return nil, chronograf.ErrOrganizationNotFound -} - -func (s *OrganizationsStore) Update(context.Context, *chronograf.Organization) error { - return fmt.Errorf("failed to update organization") -} diff --git a/chronograf/noop/servers.go b/chronograf/noop/servers.go deleted file mode 100644 index d6702f5177d..00000000000 --- a/chronograf/noop/servers.go +++ /dev/null @@ -1,33 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure ServersStore implements chronograf.ServersStore -var _ chronograf.ServersStore = &ServersStore{} - -type ServersStore struct{} - -func (s *ServersStore) All(context.Context) ([]chronograf.Server, error) { - return nil, fmt.Errorf("no servers found") -} - -func (s *ServersStore) Add(context.Context, chronograf.Server) (chronograf.Server, error) { - return chronograf.Server{}, fmt.Errorf("failed to add server") -} - -func (s *ServersStore) Delete(context.Context, chronograf.Server) error { - return fmt.Errorf("failed to delete server") -} - -func (s *ServersStore) Get(ctx context.Context, ID int) (chronograf.Server, error) { - return chronograf.Server{}, chronograf.ErrServerNotFound -} - -func (s *ServersStore) Update(context.Context, chronograf.Server) error { - return fmt.Errorf("failed to update server") -} diff --git a/chronograf/noop/sources.go b/chronograf/noop/sources.go deleted file mode 100644 index 254d9062f43..00000000000 --- a/chronograf/noop/sources.go +++ /dev/null @@ -1,33 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure SourcesStore implements chronograf.SourcesStore -var _ chronograf.SourcesStore = &SourcesStore{} - -type SourcesStore struct{} - -func (s *SourcesStore) All(context.Context) ([]chronograf.Source, error) { - return nil, fmt.Errorf("no sources found") -} - -func (s *SourcesStore) Add(context.Context, chronograf.Source) (chronograf.Source, error) { - return chronograf.Source{}, fmt.Errorf("failed to add source") -} - -func (s *SourcesStore) Delete(context.Context, chronograf.Source) error { - return fmt.Errorf("failed to delete source") -} - -func (s *SourcesStore) Get(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{}, chronograf.ErrSourceNotFound -} - -func (s *SourcesStore) Update(context.Context, chronograf.Source) error { - return fmt.Errorf("failed to update source") -} diff --git a/chronograf/noop/users.go b/chronograf/noop/users.go deleted file mode 100644 index c65881e7c49..00000000000 --- a/chronograf/noop/users.go +++ /dev/null @@ -1,37 +0,0 @@ -package noop - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure UsersStore implements chronograf.UsersStore -var _ chronograf.UsersStore = &UsersStore{} - -type UsersStore struct{} - -func (s *UsersStore) All(context.Context) ([]chronograf.User, error) { - return nil, fmt.Errorf("no users found") -} - -func (s *UsersStore) Add(context.Context, *chronograf.User) (*chronograf.User, error) { - return nil, fmt.Errorf("failed to add user") -} - -func (s *UsersStore) Delete(context.Context, *chronograf.User) error { - return fmt.Errorf("failed to delete user") -} - -func (s *UsersStore) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return nil, chronograf.ErrUserNotFound -} - -func (s *UsersStore) Update(context.Context, *chronograf.User) error { - return fmt.Errorf("failed to update user") -} - -func (s *UsersStore) Num(context.Context) (int, error) { - return 0, fmt.Errorf("failed to get number of users") -} diff --git a/chronograf/oauth2/auth0.go b/chronograf/oauth2/auth0.go deleted file mode 100644 index 927cc6d09a0..00000000000 --- a/chronograf/oauth2/auth0.go +++ /dev/null @@ -1,106 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ Provider = &Auth0{} - -type Auth0 struct { - Generic - Organizations map[string]bool // the set of allowed organizations users may belong to -} - -func (a *Auth0) PrincipalID(provider *http.Client) (string, error) { - type Account struct { - Email string `json:"email"` - Organization string `json:"organization"` - } - - resp, err := provider.Get(a.Generic.APIURL) - if err != nil { - return "", err - } - - defer resp.Body.Close() - act := Account{} - if err = json.NewDecoder(resp.Body).Decode(&act); err != nil { - return "", err - } - - // check for organization membership if required - if len(a.Organizations) > 0 && !a.Organizations[act.Organization] { - a.Logger. - WithField("org", act.Organization). - Error(ErrOrgMembership) - - return "", ErrOrgMembership - } - return act.Email, nil -} - -func (a *Auth0) Group(provider *http.Client) (string, error) { - type Account struct { - Email string `json:"email"` - Organization string `json:"organization"` - } - - resp, err := provider.Get(a.Generic.APIURL) - if err != nil { - return "", err - } - - defer resp.Body.Close() - act := Account{} - if err = json.NewDecoder(resp.Body).Decode(&act); err != nil { - return "", err - } - - return act.Organization, nil -} - -func NewAuth0(auth0Domain, clientID, clientSecret, redirectURL string, organizations []string, logger chronograf.Logger) (Auth0, error) { - domain, err := url.Parse(auth0Domain) - if err != nil { - return Auth0{}, err - } - - domain.Scheme = "https" - - domain.Path = "/authorize" - authURL := domain.String() - - domain.Path = "/oauth/token" - tokenURL := domain.String() - - domain.Path = "/userinfo" - apiURL := domain.String() - - a0 := Auth0{ - Generic: Generic{ - PageName: "auth0", - - ClientID: clientID, - ClientSecret: clientSecret, - - RequiredScopes: []string{"openid", "email"}, - - RedirectURL: redirectURL, - AuthURL: authURL, - TokenURL: tokenURL, - APIURL: apiURL, - - Logger: logger, - }, - Organizations: make(map[string]bool, len(organizations)), - } - - for _, org := range organizations { - a0.Organizations[org] = true - } - return a0, nil -} diff --git a/chronograf/oauth2/auth0_test.go b/chronograf/oauth2/auth0_test.go deleted file mode 100644 index d0ef55bf8ae..00000000000 --- a/chronograf/oauth2/auth0_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package oauth2_test - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -var auth0Tests = []struct { - name string - email string - organization string // empty string is "no organization" - - allowedUsers []string - allowedOrgs []string // empty disables organization checking - shouldErr bool -}{ - { - "Simple, no orgs", - "marty.mcfly@example.com", - "", - - []string{"marty.mcfly@example.com"}, - []string{}, - false, - }, - { - "Unauthorized", - "marty.mcfly@example.com", - "", - - []string{"doc.brown@example.com"}, - []string{}, - true, - }, - { - "Success - member of an org", - "marty.mcfly@example.com", - "time-travelers", - - []string{"marty.mcfly@example.com"}, - []string{"time-travelers"}, - false, - }, - { - "Failure - not a member of an org", - "marty.mcfly@example.com", - "time-travelers", - - []string{"marty.mcfly@example.com"}, - []string{"biffs-gang"}, - true, - }, -} - -func Test_Auth0_PrincipalID_RestrictsByOrganization(t *testing.T) { - for _, test := range auth0Tests { - t.Run(test.name, func(tt *testing.T) { - tt.Parallel() - expected := struct { - Email string `json:"email"` - Organization string `json:"organization"` - }{ - test.email, - test.organization, - } - - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/userinfo" { - rw.WriteHeader(http.StatusNotFound) - return - } - - allowed := false - for _, user := range test.allowedUsers { - if test.email == user { - allowed = true - } - } - - if !allowed { - rw.WriteHeader(http.StatusUnauthorized) - return - } - - enc := json.NewEncoder(rw) - - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expected) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov, err := oauth2.NewAuth0(mockAPI.URL, "id", "secret", mockAPI.URL+"/callback", test.allowedOrgs, logger) - if err != nil { - tt.Fatal("Unexpected error instantiating Auth0 provider: err:", err) - } - - tripper, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - tt.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tripper, - } - - var email string - email, err = prov.PrincipalID(tc) - if !test.shouldErr { - if err != nil { - tt.Fatal(test.name, ": Unexpected error while attempting to authenticate: err:", err) - } - - if email != test.email { - tt.Fatal(test.name, ": email mismatch. Got", email, "want:", test.email) - } - } - - if err == nil && test.shouldErr { - tt.Fatal(test.name, ": Expected error while attempting to authenticate but received none") - } - }) - } -} - -func Test_Auth0_ErrsWithBadDomain(t *testing.T) { - t.Parallel() - - logger := &chronograf.NoopLogger{} - _, err := oauth2.NewAuth0("!!@#$!@$%@#$%", "id", "secret", "http://example.com", []string{}, logger) - if err == nil { - t.Fatal("Expected err with bad domain but received none") - } -} diff --git a/chronograf/oauth2/cookies.go b/chronograf/oauth2/cookies.go deleted file mode 100644 index 097240ce1f8..00000000000 --- a/chronograf/oauth2/cookies.go +++ /dev/null @@ -1,135 +0,0 @@ -package oauth2 - -import ( - "context" - "net/http" - "time" -) - -const ( - // DefaultCookieName is the name of the stored cookie - DefaultCookieName = "session" - // DefaultInactivityDuration is the duration a token is valid without any new activity - DefaultInactivityDuration = 5 * time.Minute -) - -var _ Authenticator = &cookie{} - -// cookie represents the location and expiration time of new cookies. -type cookie struct { - Name string // Name is the name of the cookie stored on the browser - Lifespan time.Duration // Lifespan is the expiration date of the cookie. 0 means session cookie - Inactivity time.Duration // Inactivity is the length of time a token is valid if there is no activity - Now func() time.Time - Tokens Tokenizer -} - -// NewCookieJWT creates an Authenticator that uses cookies for auth -func NewCookieJWT(secret string, lifespan time.Duration) Authenticator { - inactivity := DefaultInactivityDuration - // Server interprets a token duration longer than the cookie lifespan as - // a token that was issued by a server with a longer auth-duration and is - // thus invalid, as a security precaution. So, inactivity must be set to - // be less than lifespan. - if lifespan > 0 && inactivity > lifespan { - inactivity = lifespan / 2 // half of the lifespan ensures tokens can be refreshed once. - } - return &cookie{ - Name: DefaultCookieName, - Lifespan: lifespan, - Inactivity: inactivity, - Now: DefaultNowTime, - Tokens: &JWT{ - Secret: secret, - Now: DefaultNowTime, - }, - } -} - -// Validate returns Principal of the Cookie if the Token is valid. -func (c *cookie) Validate(ctx context.Context, r *http.Request) (Principal, error) { - cookie, err := r.Cookie(c.Name) - if err != nil { - return Principal{}, ErrAuthentication - } - - return c.Tokens.ValidPrincipal(ctx, Token(cookie.Value), c.Lifespan) -} - -// Extend will extend the lifetime of the Token by the Inactivity time. Assumes -// Principal is already valid. -func (c *cookie) Extend(ctx context.Context, w http.ResponseWriter, p Principal) (Principal, error) { - // Refresh the token by extending its life another Inactivity duration - p, err := c.Tokens.ExtendedPrincipal(ctx, p, c.Inactivity) - if err != nil { - return Principal{}, ErrAuthentication - } - - // Creating a new token with the extended principal - token, err := c.Tokens.Create(ctx, p) - if err != nil { - return Principal{}, ErrAuthentication - } - - // Cookie lifespan can be indirectly figured out by taking the token's - // issued at time and adding the lifespan setting The token's issued at - // time happens to correspond to the cookie's original issued at time. - exp := p.IssuedAt.Add(c.Lifespan) - // Once the token has been extended, write it out as a new cookie. - c.setCookie(w, string(token), exp) - - return p, nil -} - -// Authorize will create cookies containing token information. It'll create -// a token with cookie.Duration of life to be stored as the cookie's value. -func (c *cookie) Authorize(ctx context.Context, w http.ResponseWriter, p Principal) error { - // Principal will be issued at Now() and will expire - // c.Inactivity into the future - now := c.Now() - p.IssuedAt = now - p.ExpiresAt = now.Add(c.Inactivity) - - token, err := c.Tokens.Create(ctx, p) - if err != nil { - return err - } - - // The time when the cookie expires - exp := now.Add(c.Lifespan) - c.setCookie(w, string(token), exp) - - return nil -} - -// setCookie creates a cookie with value expiring at exp and writes it as a cookie into the response -func (c *cookie) setCookie(w http.ResponseWriter, value string, exp time.Time) { - // Cookie has a Token baked into it - cookie := http.Cookie{ - Name: DefaultCookieName, - Value: value, - HttpOnly: true, - Path: "/", - } - - // Only set a cookie to be persistent (endure beyond the browser session) - // if auth duration is greater than zero - if c.Lifespan > 0 { - cookie.Expires = exp - } - http.SetCookie(w, &cookie) -} - -// Expire returns a cookie that will expire an existing cookie -func (c *cookie) Expire(w http.ResponseWriter) { - // to expire cookie set the time in the past - cookie := http.Cookie{ - Name: DefaultCookieName, - Value: "none", - HttpOnly: true, - Path: "/", - Expires: c.Now().Add(-1 * time.Hour), - } - - http.SetCookie(w, &cookie) -} diff --git a/chronograf/oauth2/cookies_test.go b/chronograf/oauth2/cookies_test.go deleted file mode 100644 index 004d0836d48..00000000000 --- a/chronograf/oauth2/cookies_test.go +++ /dev/null @@ -1,296 +0,0 @@ -package oauth2 - -import ( - "context" - "fmt" - "log" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - "time" - - gojwt "github.com/dgrijalva/jwt-go" -) - -type MockTokenizer struct { - Principal Principal - ValidErr error - Token Token - CreateErr error - ExtendErr error -} - -func (m *MockTokenizer) ValidPrincipal(ctx context.Context, token Token, duration time.Duration) (Principal, error) { - return m.Principal, m.ValidErr -} - -func (m *MockTokenizer) Create(ctx context.Context, p Principal) (Token, error) { - return m.Token, m.CreateErr -} - -func (m *MockTokenizer) ExtendedPrincipal(ctx context.Context, principal Principal, extension time.Duration) (Principal, error) { - return principal, m.ExtendErr -} - -func (m *MockTokenizer) GetClaims(tokenString string) (gojwt.MapClaims, error) { - return gojwt.MapClaims{}, nil -} - -func TestCookieAuthorize(t *testing.T) { - var test = []struct { - Desc string - Value string - Expected string - Err error - CreateErr error - }{ - { - Desc: "Unable to create token", - Err: ErrAuthentication, - CreateErr: ErrAuthentication, - }, - { - Desc: "Cookie token extracted", - Value: "reallyimportant", - Expected: "reallyimportant", - Err: nil, - }, - } - for _, test := range test { - cook := cookie{ - Lifespan: 1 * time.Second, - Now: func() time.Time { - return time.Unix(0, 0) - }, - Tokens: &MockTokenizer{ - Token: Token(test.Value), - CreateErr: test.CreateErr, - }, - } - principal := Principal{} - w := httptest.NewRecorder() - err := cook.Authorize(context.Background(), w, principal) - if err != test.Err { - t.Fatalf("Cookie extract error; expected %v actual %v", test.Err, err) - } - if test.Err != nil { - continue - } - - cookies := w.Header()["Set-Cookie"] - - if len(cookies) == 0 { - t.Fatal("Expected some cookies but got zero") - } - log.Printf("%s", cookies[0]) - if !strings.Contains(cookies[0], fmt.Sprintf("%s=%s", DefaultCookieName, test.Expected)) { - t.Errorf("Token extract error; expected %v actual %v", test.Expected, principal.Subject) - } - } -} - -func TestCookieValidate(t *testing.T) { - var test = []struct { - Desc string - Name string - Value string - Lookup string - Expected string - Err error - ValidErr error - }{ - { - Desc: "No cookie of this name", - Name: "Auth", - Value: "reallyimportant", - Lookup: "Doesntexist", - Expected: "", - Err: ErrAuthentication, - }, - { - Desc: "Unable to create token", - Name: "Auth", - Lookup: "Auth", - Err: ErrAuthentication, - ValidErr: ErrAuthentication, - }, - { - Desc: "Cookie token extracted", - Name: "Auth", - Value: "reallyimportant", - Lookup: "Auth", - Expected: "reallyimportant", - Err: nil, - }, - } - for _, test := range test { - req, _ := http.NewRequest("", "http://howdy.com", nil) - req.AddCookie(&http.Cookie{ - Name: test.Name, - Value: test.Value, - }) - - cook := cookie{ - Name: test.Lookup, - Lifespan: 1 * time.Second, - Inactivity: DefaultInactivityDuration, - Now: func() time.Time { - return time.Unix(0, 0) - }, - Tokens: &MockTokenizer{ - Principal: Principal{ - Subject: test.Value, - }, - ValidErr: test.ValidErr, - }, - } - principal, err := cook.Validate(context.Background(), req) - if err != test.Err { - t.Errorf("Cookie extract error; expected %v actual %v", test.Err, err) - } - - if principal.Subject != test.Expected { - t.Errorf("Token extract error; expected %v actual %v", test.Expected, principal.Subject) - } - } -} - -func TestNewCookieJWT(t *testing.T) { - auth := NewCookieJWT("secret", 2*time.Second) - if cookie, ok := auth.(*cookie); !ok { - t.Errorf("NewCookieJWT() did not create cookie Authenticator") - } else if cookie.Inactivity != time.Second { - t.Errorf("NewCookieJWT() inactivity was not two seconds: %s", cookie.Inactivity) - } - - auth = NewCookieJWT("secret", time.Hour) - if cookie, ok := auth.(*cookie); !ok { - t.Errorf("NewCookieJWT() did not create cookie Authenticator") - } else if cookie.Inactivity != DefaultInactivityDuration { - t.Errorf("NewCookieJWT() inactivity was not five minutes: %s", cookie.Inactivity) - } - - auth = NewCookieJWT("secret", 0) - if cookie, ok := auth.(*cookie); !ok { - t.Errorf("NewCookieJWT() did not create cookie Authenticator") - } else if cookie.Inactivity != DefaultInactivityDuration { - t.Errorf("NewCookieJWT() inactivity was not five minutes: %s", cookie.Inactivity) - } -} - -func TestCookieExtend(t *testing.T) { - history := time.Unix(-446774400, 0) - type fields struct { - Name string - Lifespan time.Duration - Inactivity time.Duration - Now func() time.Time - Tokens Tokenizer - } - type args struct { - ctx context.Context - w *httptest.ResponseRecorder - p Principal - } - tests := []struct { - name string - fields fields - args args - want Principal - wantErr bool - }{ - { - name: "Successful extention", - want: Principal{ - Subject: "subject", - }, - fields: fields{ - Name: "session", - Lifespan: time.Second, - Inactivity: time.Second, - Now: func() time.Time { - return history - }, - Tokens: &MockTokenizer{ - Principal: Principal{ - Subject: "subject", - }, - Token: "token", - ExtendErr: nil, - }, - }, - args: args{ - ctx: context.Background(), - w: httptest.NewRecorder(), - p: Principal{ - Subject: "subject", - }, - }, - }, - { - name: "Unable to extend", - wantErr: true, - fields: fields{ - Tokens: &MockTokenizer{ - ExtendErr: fmt.Errorf("bad extend"), - }, - }, - args: args{ - ctx: context.Background(), - w: httptest.NewRecorder(), - p: Principal{ - Subject: "subject", - }, - }, - }, - { - name: "Unable to create", - wantErr: true, - fields: fields{ - Tokens: &MockTokenizer{ - CreateErr: fmt.Errorf("bad extend"), - }, - }, - args: args{ - ctx: context.Background(), - w: httptest.NewRecorder(), - p: Principal{ - Subject: "subject", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &cookie{ - Name: tt.fields.Name, - Lifespan: tt.fields.Lifespan, - Inactivity: tt.fields.Inactivity, - Now: tt.fields.Now, - Tokens: tt.fields.Tokens, - } - got, err := c.Extend(tt.args.ctx, tt.args.w, tt.args.p) - if (err != nil) != tt.wantErr { - t.Errorf("cookie.Extend() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !tt.wantErr { - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("cookie.Extend() = %v, want %v", got, tt.want) - } - - cookies := tt.args.w.Header()["Set-Cookie"] - if len(cookies) == 0 { - t.Fatal("Expected some cookies but got zero") - } - log.Printf("%s", cookies) - want := fmt.Sprintf("%s=%s", DefaultCookieName, "token") - if !strings.Contains(cookies[0], want) { - t.Errorf("cookie.Extend() = %v, want %v", cookies[0], want) - } - } - }) - } -} diff --git a/chronograf/oauth2/doc.go b/chronograf/oauth2/doc.go deleted file mode 100644 index 51b4754a8f1..00000000000 --- a/chronograf/oauth2/doc.go +++ /dev/null @@ -1,141 +0,0 @@ -// Package oauth2 provides http.Handlers necessary for implementing Oauth2 -// authentication with multiple Providers. -// -// This is how the pieces of this package fit together: -// -// ┌────────────────────────────────────────┐ -// │github.com/influxdata/influxdb/chronograf/oauth2 │ -// ├────────────────────────────────────────┴────────────────────────────────────┐ -// │┌────────────────────┐ │ -// ││ <> │ ┌─────────────────────────┐ │ -// ││ Authenticator │ │ AuthMux │ │ -// │├────────────────────┤ ├─────────────────────────┤ │ -// ││Authorize() │ Auth │+SuccessURL : string │ │ -// ││Validate() ◀────────│+FailureURL : string │──────────┐ │ -// ||Expire() | |+Now : func() time.Time | | | -// │└──────────△─────────┘ └─────────────────────────┘ | | -// │ │ │ │ | -// │ │ │ │ │ -// │ │ │ │ │ -// │ │ Provider│ │ │ -// │ │ ┌───┘ │ │ -// │┌──────────┴────────────┐ │ ▽ │ -// ││ Tokenizer │ │ ┌───────────────┐ │ -// │├───────────────────────┤ ▼ │ <> │ │ -// ││Create() │ ┌───────────────┐ │ OAuth2Mux │ │ -// ││ValidPrincipal() │ │ <> │ ├───────────────┤ │ -// │└───────────────────────┘ │ Provider │ │Login() │ │ -// │ ├───────────────┤ │Logout() │ │ -// │ │ID() │ │Callback() │ │ -// │ │Scopes() │ └───────────────┘ │ -// │ │Secret() │ │ -// │ │Authenticator()│ │ -// │ └───────────────┘ │ -// │ △ │ -// │ │ │ -// │ ┌─────────────────────────┼─────────────────────────┐ │ -// │ │ │ │ │ -// │ │ │ │ │ -// │ │ │ │ │ -// │ ┌───────────────────────┐ ┌──────────────────────┐ ┌──────────────────────┐│ -// │ │ Github │ │ Google │ │ Heroku ││ -// │ ├───────────────────────┤ ├──────────────────────┤ ├──────────────────────┤│ -// │ │+ClientID : string │ │+ClientID : string │ │+ClientID : string ││ -// │ │+ClientSecret : string │ │+ClientSecret : string│ │+ClientSecret : string││ -// │ │+Orgs : []string │ │+Domains : []string │ └──────────────────────┘│ -// │ └───────────────────────┘ │+RedirectURL : string │ │ -// │ └──────────────────────┘ │ -// └─────────────────────────────────────────────────────────────────────────────┘ -// -// The design focuses on an Authenticator, a Provider, and an OAuth2Mux. Their -// responsibilities, respectively, are to decode and encode secrets received -// from a Provider, to perform Provider specific operations in order to extract -// information about a user, and to produce the handlers which persist secrets. -// To add a new provider, You need only implement the Provider interface, and -// add its endpoints to the server Mux. -// -// The Oauth2 flow between a browser, backend, and a Provider that this package -// implements is pictured below for reference. -// -// ┌─────────┐ ┌───────────┐ ┌────────┐ -// │ Browser │ │Chronograf │ │Provider│ -// └─────────┘ └───────────┘ └────────┘ -// │ │ │ -// ├─────── GET /auth ─────────▶ │ -// │ │ │ -// │ │ │ -// ◀ ─ ─ ─302 to Provider ─ ─ ┤ │ -// │ │ │ -// │ │ │ -// ├──────────────── GET /auth w/ callback ─────────────────────▶ -// │ │ │ -// │ │ │ -// ◀─ ─ ─ ─ ─ ─ ─ 302 to Chronograf Callback ─ ─ ─ ─ ─ ─ ─ ─ ┤ -// │ │ │ -// │ Code and State from │ │ -// │ Provider │ │ -// ├───────────────────────────▶ Request token w/ code & │ -// │ │ state │ -// │ ├────────────────────────────────▶ -// │ │ │ -// │ │ Response with │ -// │ │ Token │ -// │ Set cookie, Redirect │◀ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┤ -// │ to / │ │ -// ◀───────────────────────────┤ │ -// │ │ │ -// │ │ │ -// │ │ │ -// │ │ │ -// -// The browser ultimately receives a cookie from Chronograf, authorizing it. -// Its contents are encoded as a JWT whose "sub" claim is the user's email -// address for whatever provider they have authenticated with. Each request to -// Chronograf will validate the contents of this JWT against the `TOKEN_SECRET` -// and checked for expiration. The JWT's "sub" becomes the -// https://en.wikipedia.org/wiki/Principal_(computer_security) used for -// authorization to resources. -// -// The Mux is responsible for providing three http.Handlers for servicing the -// above interaction. These are mounted at specific endpoints by convention -// shared with the front end. Any future Provider routes should follow the same -// convention to ensure compatibility with the front end logic. These routes -// and their responsibilities are: -// -// /oauth/{provider}/login -// -// The `/oauth` endpoint redirects to the Provider for OAuth. Chronograf sets -// the OAuth `state` request parameter to a JWT with a random "sub". Using -// $TOKEN_SECRET `/oauth/github/callback` can validate the `state` parameter -// without needing `state` to be saved. -// -// /oauth/{provider}/callback -// -// The `/oauth/github/callback` receives the OAuth `authorization code` and `state`. -// -// First, it will validate the `state` JWT from the `/oauth` endpoint. `JWT` validation -// only requires access to the signature token. Therefore, there is no need for `state` -// to be saved. Additionally, multiple Chronograf servers will not need to share third -// party storage to synchronize `state`. If this validation fails, the request -// will be redirected to `/login`. -// -// Secondly, the endpoint will use the `authorization code` to retrieve a valid OAuth token -// with the `user:email` scope. If unable to get a token from Github, the request will -// be redirected to `/login`. -// -// Finally, the endpoint will attempt to get the primary email address of the -// user. Again, if not successful, the request will redirect to `/login`. -// -// The email address is used as the subject claim for a new JWT. This JWT becomes the -// value of the cookie sent back to the browser. The cookie is valid for thirty days. -// -// Next, the request is redirected to `/`. -// -// For all API calls to `/chronograf/v1`, the server checks for the existence and validity -// of the JWT within the cookie value. -// If the request did not have a valid JWT, the API returns `HTTP/1.1 401 Unauthorized`. -// -// /oauth/{provider}/logout -// -// Simply expires the session cookie and redirects to `/`. -package oauth2 diff --git a/chronograf/oauth2/generic.go b/chronograf/oauth2/generic.go deleted file mode 100644 index 8483c7f4fa0..00000000000 --- a/chronograf/oauth2/generic.go +++ /dev/null @@ -1,230 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" - - gojwt "github.com/dgrijalva/jwt-go" - "github.com/influxdata/influxdb/v2/chronograf" - "golang.org/x/oauth2" -) - -// ExtendedProvider extendts the base Provider interface with optional methods -type ExtendedProvider interface { - Provider - // get PrincipalID from id_token - PrincipalIDFromClaims(claims gojwt.MapClaims) (string, error) - GroupFromClaims(claims gojwt.MapClaims) (string, error) -} - -var _ ExtendedProvider = &Generic{} - -// Generic provides OAuth Login and Callback server and is modeled -// after the Github OAuth2 provider. Callback will set an authentication -// cookie. This cookie's value is a JWT containing the user's primary -// email address. -type Generic struct { - PageName string // Name displayed on the login page - ClientID string - ClientSecret string - RequiredScopes []string - Domains []string // Optional email domain checking - RedirectURL string - AuthURL string - TokenURL string - APIURL string // APIURL returns OpenID Userinfo - APIKey string // APIKey is the JSON key to lookup email address in APIURL response - Logger chronograf.Logger -} - -// Name is the name of the provider -func (g *Generic) Name() string { - if g.PageName == "" { - return "generic" - } - return g.PageName -} - -// ID returns the generic application client id -func (g *Generic) ID() string { - return g.ClientID -} - -// Secret returns the generic application client secret -func (g *Generic) Secret() string { - return g.ClientSecret -} - -// Scopes for generic provider required of the client. -func (g *Generic) Scopes() []string { - return g.RequiredScopes -} - -// Config is the Generic OAuth2 exchange information and endpoints -func (g *Generic) Config() *oauth2.Config { - return &oauth2.Config{ - ClientID: g.ID(), - ClientSecret: g.Secret(), - Scopes: g.Scopes(), - RedirectURL: g.RedirectURL, - Endpoint: oauth2.Endpoint{ - AuthURL: g.AuthURL, - TokenURL: g.TokenURL, - }, - } -} - -// PrincipalID returns the email address of the user. -func (g *Generic) PrincipalID(provider *http.Client) (string, error) { - res := map[string]interface{}{} - - r, err := provider.Get(g.APIURL) - if err != nil { - return "", err - } - - defer r.Body.Close() - if err = json.NewDecoder(r.Body).Decode(&res); err != nil { - return "", err - } - - email := "" - value := res[g.APIKey] - if e, ok := value.(string); ok { - email = e - } - - // If we did not receive an email address, try to lookup the email - // in a similar way as github - if email == "" { - email, err = g.getPrimaryEmail(provider) - if err != nil { - return "", err - } - } - - // If we need to restrict to a set of domains, we first get the org - // and filter. - if len(g.Domains) > 0 { - // If not in the domain deny permission - if ok := ofDomain(g.Domains, email); !ok { - g.Logger.Error("Not a member of required domain.") - return "", fmt.Errorf("not a member of required domain") - } - } - - return email, nil -} - -// Group returns the domain that a user belongs to in the -// the generic OAuth. -func (g *Generic) Group(provider *http.Client) (string, error) { - res := map[string]interface{}{} - - r, err := provider.Get(g.APIURL) - if err != nil { - return "", err - } - - defer r.Body.Close() - if err = json.NewDecoder(r.Body).Decode(&res); err != nil { - return "", err - } - - email := "" - value := res[g.APIKey] - if e, ok := value.(string); ok { - email = e - } - - // If we did not receive an email address, try to lookup the email - // in a similar way as github - if email == "" { - email, err = g.getPrimaryEmail(provider) - if err != nil { - return "", err - } - } - - domain := strings.Split(email, "@") - if len(domain) != 2 { - return "", fmt.Errorf("malformed email address, expected %q to contain @ symbol", email) - } - - return domain[1], nil -} - -// UserEmail represents user's email address -type UserEmail struct { - Email *string `json:"email,omitempty"` - Primary *bool `json:"primary,omitempty"` - Verified *bool `json:"verified,omitempty"` -} - -// getPrimaryEmail gets the private email account for the authenticated user. -func (g *Generic) getPrimaryEmail(client *http.Client) (string, error) { - emailsEndpoint := g.APIURL + "/emails" - r, err := client.Get(emailsEndpoint) - if err != nil { - return "", err - } - defer r.Body.Close() - - emails := []*UserEmail{} - if err = json.NewDecoder(r.Body).Decode(&emails); err != nil { - return "", err - } - - email, err := g.primaryEmail(emails) - if err != nil { - g.Logger.Error("Unable to retrieve primary email ", err.Error()) - return "", err - } - return email, nil -} - -func (g *Generic) primaryEmail(emails []*UserEmail) (string, error) { - for _, m := range emails { - if m != nil && m.Primary != nil && m.Verified != nil && m.Email != nil { - return *m.Email, nil - } - } - return "", errors.New("no primary email address") -} - -// ofDomain makes sure that the email is in one of the required domains -func ofDomain(requiredDomains []string, email string) bool { - for _, domain := range requiredDomains { - emailDomain := fmt.Sprintf("@%s", domain) - if strings.HasSuffix(email, emailDomain) { - return true - } - } - return false -} - -// PrincipalIDFromClaims verifies an optional id_token and extracts email address of the user -func (g *Generic) PrincipalIDFromClaims(claims gojwt.MapClaims) (string, error) { - if id, ok := claims[g.APIKey].(string); ok { - return id, nil - } - return "", fmt.Errorf("no claim for %s", g.APIKey) -} - -// GroupFromClaims verifies an optional id_token, extracts the email address of the user and splits off the domain part -func (g *Generic) GroupFromClaims(claims gojwt.MapClaims) (string, error) { - if id, ok := claims[g.APIKey].(string); ok { - email := strings.Split(id, "@") - if len(email) != 2 { - g.Logger.Error("malformed email address, expected %q to contain @ symbol", id) - return "DEFAULT", nil - } - - return email[1], nil - } - - return "", fmt.Errorf("no claim for %s", g.APIKey) -} diff --git a/chronograf/oauth2/generic_test.go b/chronograf/oauth2/generic_test.go deleted file mode 100644 index e54e8aadbcb..00000000000 --- a/chronograf/oauth2/generic_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package oauth2_test - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -func TestGenericGroup_withNotEmail(t *testing.T) { - t.Parallel() - - response := struct { - Email string `json:"not-email"` - }{ - "martymcfly@pinheads.rok", - } - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - rw.WriteHeader(http.StatusNotFound) - return - } - enc := json.NewEncoder(rw) - - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(response) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Generic{ - Logger: logger, - APIURL: mockAPI.URL, - APIKey: "not-email", - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - got, err := prov.Group(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - want := "pinheads.rok" - if got != want { - t.Fatal("Retrieved group was not as expected. Want:", want, "Got:", got) - } -} - -func TestGenericGroup_withEmail(t *testing.T) { - t.Parallel() - - response := struct { - Email string `json:"email"` - }{ - "martymcfly@pinheads.rok", - } - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - rw.WriteHeader(http.StatusNotFound) - return - } - enc := json.NewEncoder(rw) - - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(response) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Generic{ - Logger: logger, - APIURL: mockAPI.URL, - APIKey: "email", - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - got, err := prov.Group(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - want := "pinheads.rok" - if got != want { - t.Fatal("Retrieved group was not as expected. Want:", want, "Got:", got) - } -} - -func TestGenericPrincipalID(t *testing.T) { - t.Parallel() - - response := struct { - Email string `json:"email"` - }{ - "martymcfly@pinheads.rok", - } - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - rw.WriteHeader(http.StatusNotFound) - return - } - enc := json.NewEncoder(rw) - - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(response) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Generic{ - Logger: logger, - APIURL: mockAPI.URL, - APIKey: "email", - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - got, err := prov.PrincipalID(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - want := "martymcfly@pinheads.rok" - if got != want { - t.Fatal("Retrieved email was not as expected. Want:", want, "Got:", got) - } -} - -func TestGenericPrincipalIDDomain(t *testing.T) { - t.Parallel() - expectedEmail := []struct { - Email string `json:"email"` - Primary bool `json:"primary"` - Verified bool `json:"verified"` - }{ - {"martymcfly@pinheads.rok", true, false}, - } - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/" { - enc := json.NewEncoder(rw) - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(struct{}{}) - return - } - if r.URL.Path == "/emails" { - enc := json.NewEncoder(rw) - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expectedEmail) - return - } - - rw.WriteHeader(http.StatusNotFound) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Generic{ - Logger: logger, - Domains: []string{"pinheads.rok"}, - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - got, err := prov.PrincipalID(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - want := "martymcfly@pinheads.rok" - if got != want { - t.Fatal("Retrieved email was not as expected. Want:", want, "Got:", got) - } -} diff --git a/chronograf/oauth2/github.go b/chronograf/oauth2/github.go deleted file mode 100644 index c06d554aa2a..00000000000 --- a/chronograf/oauth2/github.go +++ /dev/null @@ -1,198 +0,0 @@ -package oauth2 - -import ( - "context" - "crypto/rand" - "encoding/base64" - "errors" - "io" - "net/http" - "strings" - - "github.com/google/go-github/github" - "github.com/influxdata/influxdb/v2/chronograf" - "golang.org/x/oauth2" - ogh "golang.org/x/oauth2/github" -) - -var _ Provider = &Github{} - -// Github provides OAuth Login and Callback server. Callback will set -// an authentication cookie. This cookie's value is a JWT containing -// the user's primary Github email address. -type Github struct { - ClientID string - ClientSecret string - Orgs []string // Optional github organization checking - Logger chronograf.Logger -} - -// Name is the name of the provider. -func (g *Github) Name() string { - return "github" -} - -// ID returns the github application client id. -func (g *Github) ID() string { - return g.ClientID -} - -// Secret returns the github application client secret. -func (g *Github) Secret() string { - return g.ClientSecret -} - -// Scopes for github is only the email address and possible organizations if -// we are filtering by organizations. -func (g *Github) Scopes() []string { - scopes := []string{"user:email"} - // In order to access a users orgs, we need the "read:org" scope - // even if g.Orgs == 0 - scopes = append(scopes, "read:org") - return scopes -} - -// Config is the Github OAuth2 exchange information and endpoints. -func (g *Github) Config() *oauth2.Config { - return &oauth2.Config{ - ClientID: g.ID(), - ClientSecret: g.Secret(), - Scopes: g.Scopes(), - Endpoint: ogh.Endpoint, - } -} - -// PrincipalID returns the github email address of the user. -func (g *Github) PrincipalID(provider *http.Client) (string, error) { - client := github.NewClient(provider) - // If we need to restrict to a set of organizations, we first get the org - // and filter. - if len(g.Orgs) > 0 { - orgs, err := getOrganizations(client, g.Logger) - if err != nil { - return "", err - } - // Not a member, so, deny permission - if ok := isMember(g.Orgs, orgs); !ok { - g.Logger.Error("Not a member of required github organization") - return "", err - } - } - - email, err := getPrimaryEmail(client, g.Logger) - if err != nil { - return "", nil - } - return email, nil -} - -// Group returns a comma delimited string of Github organizations -// that a user belongs to in Github -func (g *Github) Group(provider *http.Client) (string, error) { - client := github.NewClient(provider) - orgs, err := getOrganizations(client, g.Logger) - if err != nil { - return "", err - } - - groups := []string{} - for _, org := range orgs { - if org.Login != nil { - groups = append(groups, *org.Login) - continue - } - } - - return strings.Join(groups, ","), nil -} - -func randomString(length int) string { - k := make([]byte, length) - if _, err := io.ReadFull(rand.Reader, k); err != nil { - return "" - } - return base64.StdEncoding.EncodeToString(k) -} - -func logResponseError(log chronograf.Logger, resp *github.Response, err error) { - switch resp.StatusCode { - case http.StatusUnauthorized, http.StatusForbidden: - log.Error("OAuth access to email address forbidden ", err.Error()) - default: - log.Error("Unable to retrieve Github email ", err.Error()) - } -} - -// isMember makes sure that the user is in one of the required organizations. -func isMember(requiredOrgs []string, userOrgs []*github.Organization) bool { - for _, requiredOrg := range requiredOrgs { - for _, userOrg := range userOrgs { - if userOrg.Login != nil && *userOrg.Login == requiredOrg { - return true - } - } - } - return false -} - -// getOrganizations gets all organization for the currently authenticated user. -func getOrganizations(client *github.Client, log chronograf.Logger) ([]*github.Organization, error) { - // Get all pages of results - var allOrgs []*github.Organization - for { - opt := &github.ListOptions{ - PerPage: 10, - } - // Get the organizations for the current authenticated user. - orgs, resp, err := client.Organizations.List(context.TODO(), "", opt) - if err != nil { - logResponseError(log, resp, err) - return nil, err - } - allOrgs = append(allOrgs, orgs...) - if resp.NextPage == 0 { - break - } - opt.Page = resp.NextPage - } - return allOrgs, nil -} - -// getPrimaryEmail gets the primary email account for the authenticated user. -func getPrimaryEmail(client *github.Client, log chronograf.Logger) (string, error) { - emails, resp, err := client.Users.ListEmails(context.TODO(), nil) - if err != nil { - logResponseError(log, resp, err) - return "", err - } - - email, err := primaryEmail(emails) - if err != nil { - log.Error("Unable to retrieve primary Github email ", err.Error()) - return "", err - } - return email, nil -} - -func primaryEmail(emails []*github.UserEmail) (string, error) { - for _, m := range emails { - if m != nil && getPrimary(m) && getVerified(m) && m.Email != nil { - return *m.Email, nil - } - } - return "", errors.New("no primary email address") -} - -func getPrimary(m *github.UserEmail) bool { - if m == nil || m.Primary == nil { - return false - } - return *m.Primary -} - -func getVerified(m *github.UserEmail) bool { - if m == nil || m.Verified == nil { - return false - } - return *m.Verified -} diff --git a/chronograf/oauth2/github_test.go b/chronograf/oauth2/github_test.go deleted file mode 100644 index ed988dcda60..00000000000 --- a/chronograf/oauth2/github_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package oauth2_test - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -func TestGithubPrincipalID(t *testing.T) { - t.Parallel() - - expected := []struct { - Email string `json:"email"` - Primary bool `json:"primary"` - Verified bool `json:"verified"` - }{ - {"mcfly@example.com", false, true}, - {"martymcspelledwrong@example.com", false, false}, - {"martymcfly@example.com", true, true}, - } - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/user/emails" { - rw.WriteHeader(http.StatusNotFound) - return - } - enc := json.NewEncoder(rw) - - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expected) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Github{ - Logger: logger, - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - email, err := prov.PrincipalID(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - if got, want := email, "martymcfly@example.com"; got != want { - t.Fatal("Retrieved email was not as expected. Want:", want, "Got:", got) - } -} - -func TestGithubPrincipalIDOrganization(t *testing.T) { - t.Parallel() - - expectedUser := []struct { - Email string `json:"email"` - Primary bool `json:"primary"` - Verified bool `json:"verified"` - }{ - {"martymcfly@example.com", true, true}, - } - expectedOrg := []struct { - Login string `json:"login"` - }{ - {"Hill Valley Preservation Society"}, - } - - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/user/emails" { - enc := json.NewEncoder(rw) - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expectedUser) - return - } - if r.URL.Path == "/user/orgs" { - enc := json.NewEncoder(rw) - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expectedOrg) - return - } - rw.WriteHeader(http.StatusNotFound) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Github{ - Logger: logger, - Orgs: []string{"Hill Valley Preservation Society"}, - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - email, err := prov.PrincipalID(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - if email != expectedUser[0].Email { - t.Fatal("Retrieved email was not as expected. Want:", expectedUser[0].Email, "Got:", email) - } -} diff --git a/chronograf/oauth2/google.go b/chronograf/oauth2/google.go deleted file mode 100644 index e20029d9c1b..00000000000 --- a/chronograf/oauth2/google.go +++ /dev/null @@ -1,107 +0,0 @@ -package oauth2 - -import ( - "context" - "fmt" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" - "golang.org/x/oauth2" - goauth2 "google.golang.org/api/oauth2/v2" - "google.golang.org/api/option" -) - -// GoogleEndpoint is Google's OAuth 2.0 endpoint. -// Copied here to remove tons of package dependencies -var GoogleEndpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", -} -var _ Provider = &Google{} - -// Google is an oauth2 provider supporting google. -type Google struct { - ClientID string - ClientSecret string - RedirectURL string - Domains []string // Optional google email domain checking - Logger chronograf.Logger -} - -// Name is the name of the provider -func (g *Google) Name() string { - return "google" -} - -// ID returns the google application client id -func (g *Google) ID() string { - return g.ClientID -} - -// Secret returns the google application client secret -func (g *Google) Secret() string { - return g.ClientSecret -} - -// Scopes for google is only the email address -// Documentation is here: https://developers.google.com/+/web/api/rest/oauth#email -func (g *Google) Scopes() []string { - return []string{ - goauth2.UserinfoEmailScope, - goauth2.UserinfoProfileScope, - } -} - -// Config is the Google OAuth2 exchange information and endpoints -func (g *Google) Config() *oauth2.Config { - return &oauth2.Config{ - ClientID: g.ID(), - ClientSecret: g.Secret(), - Scopes: g.Scopes(), - Endpoint: GoogleEndpoint, - RedirectURL: g.RedirectURL, - } -} - -// PrincipalID returns the google email address of the user. -func (g *Google) PrincipalID(provider *http.Client) (string, error) { - srv, err := goauth2.NewService(context.TODO(), option.WithHTTPClient(provider)) - if err != nil { - g.Logger.Error("Unable to communicate with Google ", err.Error()) - return "", err - } - info, err := srv.Userinfo.Get().Do() - if err != nil { - g.Logger.Error("Unable to retrieve Google email ", err.Error()) - return "", err - } - // No domain filtering required, so, the user is authenticated. - if len(g.Domains) == 0 { - return info.Email, nil - } - - // Check if the account domain is acceptable - for _, requiredDomain := range g.Domains { - if info.Hd == requiredDomain { - return info.Email, nil - } - } - g.Logger.Error("Domain '", info.Hd, "' is not a member of required Google domain(s): ", g.Domains) - return "", fmt.Errorf("not in required domain") -} - -// Group returns the string of domain a user belongs to in Google -func (g *Google) Group(provider *http.Client) (string, error) { - srv, err := goauth2.NewService(context.TODO(), option.WithHTTPClient(provider)) - if err != nil { - g.Logger.Error("Unable to communicate with Google ", err.Error()) - return "", err - } - info, err := srv.Userinfo.Get().Do() - if err != nil { - g.Logger.Error("Unable to retrieve Google email ", err.Error()) - return "", err - } - - return info.Hd, nil -} diff --git a/chronograf/oauth2/google_test.go b/chronograf/oauth2/google_test.go deleted file mode 100644 index ed99d1e0ce5..00000000000 --- a/chronograf/oauth2/google_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package oauth2_test - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -func TestGooglePrincipalID(t *testing.T) { - t.Parallel() - - expected := struct { - Email string `json:"email"` - }{ - "martymcfly@example.com", - } - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/oauth2/v2/userinfo" { - rw.WriteHeader(http.StatusNotFound) - return - } - - enc := json.NewEncoder(rw) - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expected) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Google{ - Logger: logger, - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - email, err := prov.PrincipalID(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - if email != expected.Email { - t.Fatal("Retrieved email was not as expected. Want:", expected.Email, "Got:", email) - } -} - -func TestGooglePrincipalIDDomain(t *testing.T) { - t.Parallel() - - expectedUser := struct { - Email string `json:"email"` - Hd string `json:"hd"` - }{ - "martymcfly@example.com", - "Hill Valley Preservation Society", - } - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/oauth2/v2/userinfo" { - rw.WriteHeader(http.StatusNotFound) - return - } - - enc := json.NewEncoder(rw) - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expectedUser) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Google{ - Logger: logger, - Domains: []string{"Hill Valley Preservation Society"}, - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - email, err := prov.PrincipalID(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - if email != expectedUser.Email { - t.Fatal("Retrieved email was not as expected. Want:", expectedUser.Email, "Got:", email) - } -} diff --git a/chronograf/oauth2/heroku.go b/chronograf/oauth2/heroku.go deleted file mode 100644 index 2cf036db1da..00000000000 --- a/chronograf/oauth2/heroku.go +++ /dev/null @@ -1,145 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" - "golang.org/x/oauth2" - hrk "golang.org/x/oauth2/heroku" -) - -// Ensure that Heroku is an oauth2.Provider -var _ Provider = &Heroku{} - -const ( - // HerokuAccountRoute is required for interacting with Heroku API - HerokuAccountRoute string = "https://api.heroku.com/account" -) - -// Heroku is an OAuth2 Provider allowing users to authenticate with Heroku to -// gain access to Chronograf -type Heroku struct { - // OAuth2 Secrets - ClientID string - ClientSecret string - - Organizations []string // set of organizations permitted to access the protected resource. Empty means "all" - - Logger chronograf.Logger -} - -// Config returns the OAuth2 exchange information and endpoints -func (h *Heroku) Config() *oauth2.Config { - return &oauth2.Config{ - ClientID: h.ID(), - ClientSecret: h.Secret(), - Scopes: h.Scopes(), - Endpoint: hrk.Endpoint, - } -} - -// ID returns the Heroku application client ID -func (h *Heroku) ID() string { - return h.ClientID -} - -// Name returns the name of this provider (heroku) -func (h *Heroku) Name() string { - return "heroku" -} - -// PrincipalID returns the Heroku email address of the user. -func (h *Heroku) PrincipalID(provider *http.Client) (string, error) { - type DefaultOrg struct { - ID string `json:"id"` - Name string `json:"name"` - } - type Account struct { - Email string `json:"email"` - DefaultOrganization DefaultOrg `json:"default_organization"` - } - - req, err := http.NewRequest("GET", HerokuAccountRoute, nil) - if err != nil { - return "", err - } - - // Requests fail to Heroku unless this Accept header is set. - req.Header.Set("Accept", "application/vnd.heroku+json; version=3") - resp, err := provider.Do(req) - if resp.StatusCode/100 != 2 { - err := fmt.Errorf( - "unable to GET user data from %s. Status: %s", - HerokuAccountRoute, - resp.Status, - ) - h.Logger.Error("", err) - return "", err - } - if err != nil { - h.Logger.Error("Unable to communicate with Heroku. err:", err) - return "", err - } - defer resp.Body.Close() - d := json.NewDecoder(resp.Body) - - var account Account - if err := d.Decode(&account); err != nil { - h.Logger.Error("Unable to decode response from Heroku. err:", err) - return "", err - } - - // check if member of org - if len(h.Organizations) > 0 { - for _, org := range h.Organizations { - if account.DefaultOrganization.Name == org { - return account.Email, nil - } - } - h.Logger.Error(ErrOrgMembership) - return "", ErrOrgMembership - } - return account.Email, nil -} - -// Group returns the Heroku organization that user belongs to. -func (h *Heroku) Group(provider *http.Client) (string, error) { - type DefaultOrg struct { - ID string `json:"id"` - Name string `json:"name"` - } - type Account struct { - Email string `json:"email"` - DefaultOrganization DefaultOrg `json:"default_organization"` - } - - resp, err := provider.Get(HerokuAccountRoute) - if err != nil { - h.Logger.Error("Unable to communicate with Heroku. err:", err) - return "", err - } - defer resp.Body.Close() - d := json.NewDecoder(resp.Body) - - var account Account - if err := d.Decode(&account); err != nil { - h.Logger.Error("Unable to decode response from Heroku. err:", err) - return "", err - } - - return account.DefaultOrganization.Name, nil -} - -// Scopes for heroku is "identity" which grants access to user account -// information. This will grant us access to the user's email address which is -// used as the Principal's identifier. -func (h *Heroku) Scopes() []string { - return []string{"identity"} -} - -// Secret returns the Heroku application client secret -func (h *Heroku) Secret() string { - return h.ClientSecret -} diff --git a/chronograf/oauth2/heroku_test.go b/chronograf/oauth2/heroku_test.go deleted file mode 100644 index e6785b55599..00000000000 --- a/chronograf/oauth2/heroku_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package oauth2_test - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -func Test_Heroku_PrincipalID_ExtractsEmailAddress(t *testing.T) { - t.Parallel() - - expected := struct { - Email string `json:"email"` - }{ - "martymcfly@example.com", - } - - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/account" { - rw.WriteHeader(http.StatusNotFound) - return - } - enc := json.NewEncoder(rw) - - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expected) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Heroku{ - Logger: logger, - } - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - email, err := prov.PrincipalID(tc) - if err != nil { - t.Fatal("Unexpected error while retrieving PrincipalID: err:", err) - } - - if email != expected.Email { - t.Fatal("Retrieved email was not as expected. Want:", expected.Email, "Got:", email) - } -} - -func Test_Heroku_PrincipalID_RestrictsByOrganization(t *testing.T) { - t.Parallel() - - expected := struct { - Email string `json:"email"` - DefaultOrganization map[string]string `json:"default_organization"` - }{ - "martymcfly@example.com", - map[string]string{ - "id": "a85eac89-56cc-498e-9a89-d8f49f6aed71", - "name": "hill-valley-preservation-society", - }, - } - - mockAPI := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/account" { - rw.WriteHeader(http.StatusNotFound) - return - } - enc := json.NewEncoder(rw) - - rw.WriteHeader(http.StatusOK) - _ = enc.Encode(expected) - })) - defer mockAPI.Close() - - logger := &chronograf.NoopLogger{} - prov := oauth2.Heroku{ - Logger: logger, - Organizations: []string{"enchantment-under-the-sea-dance-committee"}, - } - - tt, err := oauth2.NewTestTripper(logger, mockAPI, http.DefaultTransport) - if err != nil { - t.Fatal("Error initializing TestTripper: err:", err) - } - - tc := &http.Client{ - Transport: tt, - } - - _, err = prov.PrincipalID(tc) - if err == nil { - t.Fatal("Expected error while authenticating user with mismatched orgs, but received none") - } -} diff --git a/chronograf/oauth2/jwt.go b/chronograf/oauth2/jwt.go deleted file mode 100644 index c5f51eee3a8..00000000000 --- a/chronograf/oauth2/jwt.go +++ /dev/null @@ -1,262 +0,0 @@ -package oauth2 - -import ( - "context" - "crypto/x509" - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "time" - - gojwt "github.com/dgrijalva/jwt-go" -) - -// Ensure JWT conforms to the Tokenizer interface -var _ Tokenizer = &JWT{} - -// JWT represents a javascript web token that can be validated or marshaled into string. -type JWT struct { - Secret string - Jwksurl string - Now func() time.Time -} - -// NewJWT creates a new JWT using time.Now -// secret is used for signing and validating signatures (HS256/HMAC) -// jwksurl is used for validating RS256 signatures. -func NewJWT(secret string, jwksurl string) *JWT { - return &JWT{ - Secret: secret, - Jwksurl: jwksurl, - Now: DefaultNowTime, - } -} - -// Ensure Claims implements the jwt.Claims interface -var _ gojwt.Claims = &Claims{} - -// Claims extends jwt.StandardClaims' Valid to make sure claims has a subject. -type Claims struct { - gojwt.StandardClaims - // We were unable to find a standard claim at https://www.iana.org/assignments/jwt/jwt.xhtml - // that felt appropriate for Organization. As a result, we added a custom `org` field. - Organization string `json:"org,omitempty"` - // We were unable to find a standard claim at https://www.iana.org/assignments/jwt/jwt.xhtml - // that felt appropriate for a users Group(s). As a result we added a custom `grp` field. - // Multiple groups may be specified by comma delimiting the various group. - // - // The singlular `grp` was chosen over the `grps` to keep consistent with the JWT naming - // convention (it is common for singlularly named values to actually be arrays, see `given_name`, - // `family_name`, and `middle_name` in the iana link provided above). I should add the discalimer - // I'm currently sick, so this thought process might be off. - Group string `json:"grp,omitempty"` -} - -// Valid adds an empty subject test to the StandardClaims checks. -func (c *Claims) Valid() error { - if err := c.StandardClaims.Valid(); err != nil { - return err - } else if c.StandardClaims.Subject == "" { - return fmt.Errorf("claim has no subject") - } - - return nil -} - -// ValidPrincipal checks if the jwtToken is signed correctly and validates with Claims. lifespan is the -// maximum valid lifetime of a token. If the lifespan is 0 then the auth lifespan duration is not checked. -func (j *JWT) ValidPrincipal(ctx context.Context, jwtToken Token, lifespan time.Duration) (Principal, error) { - gojwt.TimeFunc = j.Now - - // Check for expected signing method. - alg := j.KeyFunc - - return j.ValidClaims(jwtToken, lifespan, alg) -} - -// KeyFunc verifies HMAC or RSA/RS256 signatures -func (j *JWT) KeyFunc(token *gojwt.Token) (interface{}, error) { - if _, ok := token.Method.(*gojwt.SigningMethodHMAC); ok { - return []byte(j.Secret), nil - } else if _, ok := token.Method.(*gojwt.SigningMethodRSA); ok { - return j.KeyFuncRS256(token) - } - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) -} - -// For the id_token, the recommended signature algorithm is RS256, which -// means we need to verify the token against a public key. This public key -// is available from the key discovery service in JSON Web Key (JWK). -// JWK is specified in RFC 7517. -// -// The location of the key discovery service (JWKSURL) is published in the -// OpenID Provider Configuration Information at /.well-known/openid-configuration -// implements rfc7517 section 4.7 "x5c" (X.509 Certificate Chain) Parameter - -// JWK defines a JSON Web KEy nested struct -type JWK struct { - Kty string `json:"kty"` - Use string `json:"use"` - Alg string `json:"alg"` - Kid string `json:"kid"` - X5t string `json:"x5t"` - N string `json:"n"` - E string `json:"e"` - X5c []string `json:"x5c"` -} - -// JWKS defines a JKW[] -type JWKS struct { - Keys []JWK `json:"keys"` -} - -// KeyFuncRS256 verifies RS256 signed JWT tokens, it looks up the signing key in the key discovery service -func (j *JWT) KeyFuncRS256(token *gojwt.Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*gojwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("unsupported signing method: %v", token.Header["alg"]) - } - - // read JWKS document from key discovery service - if j.Jwksurl == "" { - return nil, fmt.Errorf("token JWKSURL not specified, cannot validate RS256 signature") - } - - rr, err := http.Get(j.Jwksurl) - if err != nil { - return nil, err - } - defer rr.Body.Close() - body, err := ioutil.ReadAll(rr.Body) - if err != nil { - return nil, err - } - - // parse json to struct - var jwks JWKS - if err := json.Unmarshal([]byte(body), &jwks); err != nil { - return nil, err - } - - // extract cert when kid and alg match - var certPkix []byte - for _, jwk := range jwks.Keys { - if token.Header["kid"] == jwk.Kid { - // FIXME: optionally walk the key chain, see rfc7517 section 4.7 - certPkix, err = base64.StdEncoding.DecodeString(jwk.X5c[0]) - if err != nil { - return nil, fmt.Errorf("base64 decode error for JWK kid %v", token.Header["kid"]) - } - } - } - if certPkix == nil { - return nil, fmt.Errorf("no signing key found for kid %v", token.Header["kid"]) - } - - // parse certificate (from PKIX format) and return signing key - cert, err := x509.ParseCertificate(certPkix) - if err != nil { - return nil, err - } - return cert.PublicKey, nil -} - -// ValidClaims validates a token with StandardClaims -func (j *JWT) ValidClaims(jwtToken Token, lifespan time.Duration, alg gojwt.Keyfunc) (Principal, error) { - // 1. Checks for expired tokens - // 2. Checks if time is after the issued at - // 3. Check if time is after not before (nbf) - // 4. Check if subject is not empty - // 5. Check if duration less than auth lifespan - token, err := gojwt.ParseWithClaims(string(jwtToken), &Claims{}, alg) - if err != nil { - return Principal{}, err - // at time of this writing and researching the docs, token.Valid seems to be always true - } else if !token.Valid { - return Principal{}, err - } - - // at time of this writing and researching the docs, there will always be claims - claims, ok := token.Claims.(*Claims) - if !ok { - return Principal{}, fmt.Errorf("unable to convert claims to standard claims") - } - - exp := time.Unix(claims.ExpiresAt, 0) - iat := time.Unix(claims.IssuedAt, 0) - - // If the duration of the claim is longer than the auth lifespan then this is - // an invalid claim because server assumes that lifespan is the maximum possible - // duration. However, a lifespan of zero means that the duration comparison - // against the auth duration is not needed. - if lifespan > 0 && exp.Sub(iat) > lifespan { - return Principal{}, fmt.Errorf("claims duration is different from auth lifespan") - } - - return Principal{ - Subject: claims.Subject, - Issuer: claims.Issuer, - Organization: claims.Organization, - Group: claims.Group, - ExpiresAt: exp, - IssuedAt: iat, - }, nil -} - -// GetClaims extracts claims from id_token -func (j *JWT) GetClaims(tokenString string) (gojwt.MapClaims, error) { - var claims gojwt.MapClaims - - gojwt.TimeFunc = j.Now - token, err := gojwt.Parse(tokenString, j.KeyFunc) - if err != nil { - return nil, err - } - - if !token.Valid { - return nil, fmt.Errorf("token is not valid") - } - - claims, ok := token.Claims.(gojwt.MapClaims) - if !ok { - return nil, fmt.Errorf("token has no claims") - } - - return claims, nil -} - -// Create creates a signed JWT token from user that expires at Principal's ExpireAt time. -func (j *JWT) Create(ctx context.Context, user Principal) (Token, error) { - // Create a new token object, specifying signing method and the claims - // you would like it to contain. - claims := &Claims{ - StandardClaims: gojwt.StandardClaims{ - Subject: user.Subject, - Issuer: user.Issuer, - ExpiresAt: user.ExpiresAt.Unix(), - IssuedAt: user.IssuedAt.Unix(), - NotBefore: user.IssuedAt.Unix(), - }, - Organization: user.Organization, - Group: user.Group, - } - token := gojwt.NewWithClaims(gojwt.SigningMethodHS256, claims) - // Sign and get the complete encoded token as a string using the secret - t, err := token.SignedString([]byte(j.Secret)) - // this will only fail if the JSON can't be encoded correctly - if err != nil { - return "", err - } - return Token(t), nil -} - -// ExtendedPrincipal sets the expires at to be the current time plus the extention into the future -func (j *JWT) ExtendedPrincipal(ctx context.Context, principal Principal, extension time.Duration) (Principal, error) { - // Extend the time of expiration. Do not change IssuedAt as the - // lifetime of the token is extended, but, NOT the original time - // of issue. This is used to enforce a maximum lifetime of a token - principal.ExpiresAt = j.Now().Add(extension) - return principal, nil -} diff --git a/chronograf/oauth2/jwt_test.go b/chronograf/oauth2/jwt_test.go deleted file mode 100644 index 904429135cd..00000000000 --- a/chronograf/oauth2/jwt_test.go +++ /dev/null @@ -1,288 +0,0 @@ -package oauth2_test - -import ( - "context" - "errors" - "io" - "net/http" - "net/http/httptest" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -func TestAuthenticate(t *testing.T) { - history := time.Unix(-446774400, 0) - var tests = []struct { - Desc string - Secret string - // JWT tokens were generated at https://jwt.io/ using their Debugger - Token oauth2.Token - Duration time.Duration - Principal oauth2.Principal - Err error - }{ - { - Desc: "Test bad jwt token", - Secret: "secret", - Token: "badtoken", - Principal: oauth2.Principal{ - Subject: "", - }, - Err: errors.New("token contains an invalid number of segments"), - }, - { - Desc: "Test valid jwt token", - Secret: "secret", - Token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIvY2hyb25vZ3JhZi92MS91c2Vycy8xIiwibmFtZSI6IkRvYyBCcm93biIsImlhdCI6LTQ0Njc3NDQwMCwiZXhwIjotNDQ2Nzc0Mzk5LCJuYmYiOi00NDY3NzQ0MDB9.Ga0zGXWTT2CBVnnIhIO5tUAuBEVk4bKPaT4t4MU1ngo", - Duration: time.Second, - Principal: oauth2.Principal{ - Subject: "/chronograf/v1/users/1", - ExpiresAt: history.Add(time.Second), - IssuedAt: history, - }, - }, - { - Desc: "Test valid jwt token with organization", - Secret: "secret", - Token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIvY2hyb25vZ3JhZi92MS91c2Vycy8xIiwibmFtZSI6IkRvYyBCcm93biIsIm9yZyI6IjEzMzciLCJpYXQiOi00NDY3NzQ0MDAsImV4cCI6LTQ0Njc3NDM5OSwibmJmIjotNDQ2Nzc0NDAwfQ.b38MK5liimWsvvJr4a3GNYRDJOAN7WCrfZ0FfZftqjc", - Duration: time.Second, - Principal: oauth2.Principal{ - Subject: "/chronograf/v1/users/1", - Organization: "1337", - ExpiresAt: history.Add(time.Second), - IssuedAt: history, - }, - }, - { - Desc: "Test expired jwt token", - Secret: "secret", - Token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIvY2hyb25vZ3JhZi92MS91c2Vycy8xIiwibmFtZSI6IkRvYyBCcm93biIsImlhdCI6LTQ0Njc3NDQwMCwiZXhwIjotNDQ2Nzc0NDAxLCJuYmYiOi00NDY3NzQ0MDB9.vWXdm0-XQ_pW62yBpSISFFJN_yz0vqT9_INcUKTp5Q8", - Duration: time.Second, - Principal: oauth2.Principal{ - Subject: "", - ExpiresAt: history.Add(time.Second), - IssuedAt: history, - }, - Err: errors.New("token is expired by 1s"), - }, - { - Desc: "Test jwt token not before time", - Secret: "secret", - Token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIvY2hyb25vZ3JhZi92MS91c2Vycy8xIiwibmFtZSI6IkRvYyBCcm93biIsImlhdCI6LTQ0Njc3NDQwMCwiZXhwIjotNDQ2Nzc0NDAwLCJuYmYiOi00NDY3NzQzOTl9.TMGAhv57u1aosjc4ywKC7cElP1tKyQH7GmRF2ToAxlE", - Duration: time.Second, - Principal: oauth2.Principal{ - Subject: "", - ExpiresAt: history.Add(time.Second), - IssuedAt: history, - }, - Err: errors.New("token is not valid yet"), - }, - { - Desc: "Test jwt with empty subject is invalid", - Secret: "secret", - Token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOi00NDY3NzQ0MDAsImV4cCI6LTQ0Njc3NDQwMCwibmJmIjotNDQ2Nzc0NDAwfQ.gxsA6_Ei3s0f2I1TAtrrb8FmGiO25OqVlktlF_ylhX4", - Duration: time.Second, - Principal: oauth2.Principal{ - Subject: "", - ExpiresAt: history.Add(time.Second), - IssuedAt: history, - }, - Err: errors.New("claim has no subject"), - }, - { - Desc: "Test jwt duration matches auth duration", - Secret: "secret", - Token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOi00NDY3NzQzMDAsImlhdCI6LTQ0Njc3NDQwMCwiaXNzIjoiaGlsbHZhbGxleSIsIm5iZiI6LTQ0Njc3NDQwMCwic3ViIjoibWFydHlAcGluaGVhZC5uZXQifQ.njEjstpuIDnghSR7VyPPB9QlvJ6Q5JpR3ZEZ_8vGYfA", - Duration: time.Second, - Principal: oauth2.Principal{ - Subject: "marty@pinhead.net", - ExpiresAt: history, - IssuedAt: history.Add(100 * time.Second), - }, - Err: errors.New("claims duration is different from auth lifespan"), - }, - } - for _, test := range tests { - j := oauth2.JWT{ - Secret: test.Secret, - Now: func() time.Time { - return time.Unix(-446774400, 0) - }, - } - principal, err := j.ValidPrincipal(context.Background(), test.Token, test.Duration) - if test.Err != nil && err == nil { - t.Fatalf("Expected err %s", test.Err.Error()) - } - if err != nil { - if test.Err == nil { - t.Errorf("Error in test %s authenticating with bad token: %v", test.Desc, err) - } else if err.Error() != test.Err.Error() { - t.Errorf("Error in test %s expected error: %v actual: %v", test.Desc, test.Err, err) - } - } else if test.Principal != principal { - t.Errorf("Error in test %s; principals different; expected: %v actual: %v", test.Desc, test.Principal, principal) - } - } - -} - -func TestToken(t *testing.T) { - expected := oauth2.Token("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOi00NDY3NzQzOTksImlhdCI6LTQ0Njc3NDQwMCwibmJmIjotNDQ2Nzc0NDAwLCJzdWIiOiIvY2hyb25vZ3JhZi92MS91c2Vycy8xIn0.ofQM6yTmrmve5JeEE0RcK4_euLXuZ_rdh6bLAbtbC9M") - history := time.Unix(-446774400, 0) - j := oauth2.JWT{ - Secret: "secret", - Now: func() time.Time { - return history - }, - } - p := oauth2.Principal{ - Subject: "/chronograf/v1/users/1", - ExpiresAt: history.Add(time.Second), - IssuedAt: history, - } - if token, err := j.Create(context.Background(), p); err != nil { - t.Errorf("Error creating token for principal: %v", err) - } else if token != expected { - t.Errorf("Error creating token; expected: %s actual: %s", expected, token) - } -} - -func TestSigningMethod(t *testing.T) { - token := oauth2.Token("eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.EkN-DOsnsuRjRO6BxXemmJDm3HbxrbRzXglbN2S4sOkopdU4IsDxTI8jO19W_A4K8ZPJijNLis4EZsHeY559a4DFOd50_OqgHGuERTqYZyuhtF39yxJPAjUESwxk2J5k_4zM3O-vtd1Ghyo4IbqKKSy6J9mTniYJPenn5-HIirE") - j := oauth2.JWT{} - if _, err := j.ValidPrincipal(context.Background(), token, 0); err == nil { - t.Error("Error was expected while validating incorrectly signed token") - } else if err.Error() != "token JWKSURL not specified, cannot validate RS256 signature" { - t.Errorf("Error wanted 'token JWKSURL not specified, cannot validate RS256 signature', got %s", err.Error()) - } -} - -func TestGetClaims(t *testing.T) { - var tests = []struct { - Name string - TokenString string - JwksDocument string - Iat int64 - Err error - }{ - { - Name: "Valid Token with RS256 signature verified against correct JWKS document", - TokenString: "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSIsImtpZCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSJ9.eyJhdWQiOiJjaHJvbm9ncmFmIiwiaXNzIjoiaHR0cHM6Ly9kc3RjaW1hYWQxcC5kc3QtaXRzLmRlL2FkZnMiLCJpYXQiOjE1MTMxNjU4ODksImV4cCI6MTUxMzE2OTQ4OSwiYXV0aF90aW1lIjoxNTEzMTY1ODg4LCJzdWIiOiJlWVYzamRsZE55RlkxcUZGSDRvQWRCdkRGZmJWZm51RzI5SGlIa1N1andrPSIsInVwbiI6ImJzY0Bkc3QtaXRzLmRlIiwidW5pcXVlX25hbWUiOiJEU1RcXGJzYyIsInNpZCI6IlMtMS01LTIxLTI1MDUxNTEzOTgtMjY2MTAyODEwOS0zNzU0MjY1ODIwLTExMDQifQ.nK51Ui4XN45SVul9igNaKFQd-F63BNstBzW-T5LBVm_ANHCEHyP3_88C3ffkkQIi3PxYacRJGtfswP35ws7YJUcNp-GoGZARqz62NpMtbQyhos6mCaVXwPoxPbrZx4AkMQgxkZwJcOzceX7mpjcT3kCth30chN3lkhzSjGrXe4ZDOAV25liS-dsdBiqDiaTB91sS534GM76qJQxFUs51oSbYTRdCN1VJ0XopMcasfVDzFrtSbyvEIVXlpKK2HplnhheqF4QHrM_3cjV_NGRr3tYLe-AGTdDXKWlJD1GDz1ECXeMGQHPoz3U8cqNsFLYBstIlCgfnBWgWsPZSvJPJUg", - JwksDocument: `{"keys":[{"kty":"RSA","use":"sig","alg":"RS256","kid":"YDBUhqdWksKXdGuX0sytjaUuxhA","x5t":"YDBUhqdWksKXdGuX0sytjaUuxhA","n":"uwVVrs5OJRKeLUk0H5N_b4Jbvff3rxlg3WIeOO-zSSPTC5oFOc5_te0rLgVoNJJB4rNM4A7BEXI885xLrjfL3l3LHqaJetvR0tdLAnkvbUKUiGxnuGnmOsgh491P95pHPIAniy2p64FQoBbTJ0a6cF5LRuPPHKVXgjXjTydvmKrt_IVaWUDgICRsw5Bbv290SahmxcdO3akSgfsZtRkR8SmaMzAPYINi2_8P2evaKAnMQLTgUVkctaEamO_6HJ5f5sWheV7trLekU35xPVkPwShDelefnhyJcO5yICXqXzuewBEni9LrxAEJYN2rYfiFQWJy-pDe5DPUBs-IFTpctQ","e":"AQAB","x5c":["MIIC6DCCAdCgAwIBAgIQPszqLhbrpZlE+jEJTyJg7jANBgkqhkiG9w0BAQsFADAwMS4wLAYDVQQDEyVBREZTIFNpZ25pbmcgLSBkc3RjaW1hYWQxcC5kc3QtaXRzLmRlMB4XDTE3MTIwNDE0MDEwOFoXDTE4MTIwNDE0MDEwOFowMDEuMCwGA1UEAxMlQURGUyBTaWduaW5nIC0gZHN0Y2ltYWFkMXAuZHN0LWl0cy5kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALsFVa7OTiUSni1JNB+Tf2+CW733968ZYN1iHjjvs0kj0wuaBTnOf7XtKy4FaDSSQeKzTOAOwRFyPPOcS643y95dyx6miXrb0dLXSwJ5L21ClIhsZ7hp5jrIIePdT\/eaRzyAJ4stqeuBUKAW0ydGunBeS0bjzxylV4I1408nb5iq7fyFWllA4CAkbMOQW79vdEmoZsXHTt2pEoH7GbUZEfEpmjMwD2CDYtv\/D9nr2igJzEC04FFZHLWhGpjv+hyeX+bFoXle7ay3pFN+cT1ZD8EoQ3pXn54ciXDuciAl6l87nsARJ4vS68QBCWDdq2H4hUFicvqQ3uQz1AbPiBU6XLUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAPHCisZyPf\/fuuQEW5LyzZSYMwBRYVR6kk\/M2ZNx6TrUEwmOb10RQ3G97bLAshN44g5lWdPYz4EOt6d2o71etIjf79f+IR0MAjEgBB2HThaHcMU9KG229Ftcauie9XeurngMawTRu60YqH7+go8EMf6a1Kdnx37DMy\/1LRlsYJVfEoOCab3GgcIdXrRSYWqsY4SVJZiTPYdqz9vmNPSXXiDSOTl6qXHV\/f53WTS2V5aIQbuJJziXlceusuVNny0o5h+j6ovZ1HhEGAu3lpD+8kY8KUqA4kXMH3VNZqzHBYazJx\/QBB3bG45cZSOvV3gUOnGBgiv9NBWjhvmY0fC3J6Q=="]}]}`, - Iat: int64(1513165889), - }, - { - Name: "Valid Token with RS256 signature verified against correct JWKS document but predated", - TokenString: "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSIsImtpZCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSJ9.eyJhdWQiOiJjaHJvbm9ncmFmIiwiaXNzIjoiaHR0cHM6Ly9kc3RjaW1hYWQxcC5kc3QtaXRzLmRlL2FkZnMiLCJpYXQiOjE1MTMxNjU4ODksImV4cCI6MTUxMzE2OTQ4OSwiYXV0aF90aW1lIjoxNTEzMTY1ODg4LCJzdWIiOiJlWVYzamRsZE55RlkxcUZGSDRvQWRCdkRGZmJWZm51RzI5SGlIa1N1andrPSIsInVwbiI6ImJzY0Bkc3QtaXRzLmRlIiwidW5pcXVlX25hbWUiOiJEU1RcXGJzYyIsInNpZCI6IlMtMS01LTIxLTI1MDUxNTEzOTgtMjY2MTAyODEwOS0zNzU0MjY1ODIwLTExMDQifQ.nK51Ui4XN45SVul9igNaKFQd-F63BNstBzW-T5LBVm_ANHCEHyP3_88C3ffkkQIi3PxYacRJGtfswP35ws7YJUcNp-GoGZARqz62NpMtbQyhos6mCaVXwPoxPbrZx4AkMQgxkZwJcOzceX7mpjcT3kCth30chN3lkhzSjGrXe4ZDOAV25liS-dsdBiqDiaTB91sS534GM76qJQxFUs51oSbYTRdCN1VJ0XopMcasfVDzFrtSbyvEIVXlpKK2HplnhheqF4QHrM_3cjV_NGRr3tYLe-AGTdDXKWlJD1GDz1ECXeMGQHPoz3U8cqNsFLYBstIlCgfnBWgWsPZSvJPJUg", - JwksDocument: `{"keys":[{"kty":"RSA","use":"sig","alg":"RS256","kid":"YDBUhqdWksKXdGuX0sytjaUuxhA","x5t":"YDBUhqdWksKXdGuX0sytjaUuxhA","n":"uwVVrs5OJRKeLUk0H5N_b4Jbvff3rxlg3WIeOO-zSSPTC5oFOc5_te0rLgVoNJJB4rNM4A7BEXI885xLrjfL3l3LHqaJetvR0tdLAnkvbUKUiGxnuGnmOsgh491P95pHPIAniy2p64FQoBbTJ0a6cF5LRuPPHKVXgjXjTydvmKrt_IVaWUDgICRsw5Bbv290SahmxcdO3akSgfsZtRkR8SmaMzAPYINi2_8P2evaKAnMQLTgUVkctaEamO_6HJ5f5sWheV7trLekU35xPVkPwShDelefnhyJcO5yICXqXzuewBEni9LrxAEJYN2rYfiFQWJy-pDe5DPUBs-IFTpctQ","e":"AQAB","x5c":["MIIC6DCCAdCgAwIBAgIQPszqLhbrpZlE+jEJTyJg7jANBgkqhkiG9w0BAQsFADAwMS4wLAYDVQQDEyVBREZTIFNpZ25pbmcgLSBkc3RjaW1hYWQxcC5kc3QtaXRzLmRlMB4XDTE3MTIwNDE0MDEwOFoXDTE4MTIwNDE0MDEwOFowMDEuMCwGA1UEAxMlQURGUyBTaWduaW5nIC0gZHN0Y2ltYWFkMXAuZHN0LWl0cy5kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALsFVa7OTiUSni1JNB+Tf2+CW733968ZYN1iHjjvs0kj0wuaBTnOf7XtKy4FaDSSQeKzTOAOwRFyPPOcS643y95dyx6miXrb0dLXSwJ5L21ClIhsZ7hp5jrIIePdT\/eaRzyAJ4stqeuBUKAW0ydGunBeS0bjzxylV4I1408nb5iq7fyFWllA4CAkbMOQW79vdEmoZsXHTt2pEoH7GbUZEfEpmjMwD2CDYtv\/D9nr2igJzEC04FFZHLWhGpjv+hyeX+bFoXle7ay3pFN+cT1ZD8EoQ3pXn54ciXDuciAl6l87nsARJ4vS68QBCWDdq2H4hUFicvqQ3uQz1AbPiBU6XLUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAPHCisZyPf\/fuuQEW5LyzZSYMwBRYVR6kk\/M2ZNx6TrUEwmOb10RQ3G97bLAshN44g5lWdPYz4EOt6d2o71etIjf79f+IR0MAjEgBB2HThaHcMU9KG229Ftcauie9XeurngMawTRu60YqH7+go8EMf6a1Kdnx37DMy\/1LRlsYJVfEoOCab3GgcIdXrRSYWqsY4SVJZiTPYdqz9vmNPSXXiDSOTl6qXHV\/f53WTS2V5aIQbuJJziXlceusuVNny0o5h+j6ovZ1HhEGAu3lpD+8kY8KUqA4kXMH3VNZqzHBYazJx\/QBB3bG45cZSOvV3gUOnGBgiv9NBWjhvmY0fC3J6Q=="]}]}`, - Iat: int64(1513165889) - 1, - Err: errors.New("Token used before issued"), - }, - { - Name: "Valid Token with RS256 signature verified against correct JWKS document but outdated", - TokenString: "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSIsImtpZCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSJ9.eyJhdWQiOiJjaHJvbm9ncmFmIiwiaXNzIjoiaHR0cHM6Ly9kc3RjaW1hYWQxcC5kc3QtaXRzLmRlL2FkZnMiLCJpYXQiOjE1MTMxNjU4ODksImV4cCI6MTUxMzE2OTQ4OSwiYXV0aF90aW1lIjoxNTEzMTY1ODg4LCJzdWIiOiJlWVYzamRsZE55RlkxcUZGSDRvQWRCdkRGZmJWZm51RzI5SGlIa1N1andrPSIsInVwbiI6ImJzY0Bkc3QtaXRzLmRlIiwidW5pcXVlX25hbWUiOiJEU1RcXGJzYyIsInNpZCI6IlMtMS01LTIxLTI1MDUxNTEzOTgtMjY2MTAyODEwOS0zNzU0MjY1ODIwLTExMDQifQ.nK51Ui4XN45SVul9igNaKFQd-F63BNstBzW-T5LBVm_ANHCEHyP3_88C3ffkkQIi3PxYacRJGtfswP35ws7YJUcNp-GoGZARqz62NpMtbQyhos6mCaVXwPoxPbrZx4AkMQgxkZwJcOzceX7mpjcT3kCth30chN3lkhzSjGrXe4ZDOAV25liS-dsdBiqDiaTB91sS534GM76qJQxFUs51oSbYTRdCN1VJ0XopMcasfVDzFrtSbyvEIVXlpKK2HplnhheqF4QHrM_3cjV_NGRr3tYLe-AGTdDXKWlJD1GDz1ECXeMGQHPoz3U8cqNsFLYBstIlCgfnBWgWsPZSvJPJUg", - JwksDocument: `{"keys":[{"kty":"RSA","use":"sig","alg":"RS256","kid":"YDBUhqdWksKXdGuX0sytjaUuxhA","x5t":"YDBUhqdWksKXdGuX0sytjaUuxhA","n":"uwVVrs5OJRKeLUk0H5N_b4Jbvff3rxlg3WIeOO-zSSPTC5oFOc5_te0rLgVoNJJB4rNM4A7BEXI885xLrjfL3l3LHqaJetvR0tdLAnkvbUKUiGxnuGnmOsgh491P95pHPIAniy2p64FQoBbTJ0a6cF5LRuPPHKVXgjXjTydvmKrt_IVaWUDgICRsw5Bbv290SahmxcdO3akSgfsZtRkR8SmaMzAPYINi2_8P2evaKAnMQLTgUVkctaEamO_6HJ5f5sWheV7trLekU35xPVkPwShDelefnhyJcO5yICXqXzuewBEni9LrxAEJYN2rYfiFQWJy-pDe5DPUBs-IFTpctQ","e":"AQAB","x5c":["MIIC6DCCAdCgAwIBAgIQPszqLhbrpZlE+jEJTyJg7jANBgkqhkiG9w0BAQsFADAwMS4wLAYDVQQDEyVBREZTIFNpZ25pbmcgLSBkc3RjaW1hYWQxcC5kc3QtaXRzLmRlMB4XDTE3MTIwNDE0MDEwOFoXDTE4MTIwNDE0MDEwOFowMDEuMCwGA1UEAxMlQURGUyBTaWduaW5nIC0gZHN0Y2ltYWFkMXAuZHN0LWl0cy5kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALsFVa7OTiUSni1JNB+Tf2+CW733968ZYN1iHjjvs0kj0wuaBTnOf7XtKy4FaDSSQeKzTOAOwRFyPPOcS643y95dyx6miXrb0dLXSwJ5L21ClIhsZ7hp5jrIIePdT\/eaRzyAJ4stqeuBUKAW0ydGunBeS0bjzxylV4I1408nb5iq7fyFWllA4CAkbMOQW79vdEmoZsXHTt2pEoH7GbUZEfEpmjMwD2CDYtv\/D9nr2igJzEC04FFZHLWhGpjv+hyeX+bFoXle7ay3pFN+cT1ZD8EoQ3pXn54ciXDuciAl6l87nsARJ4vS68QBCWDdq2H4hUFicvqQ3uQz1AbPiBU6XLUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAPHCisZyPf\/fuuQEW5LyzZSYMwBRYVR6kk\/M2ZNx6TrUEwmOb10RQ3G97bLAshN44g5lWdPYz4EOt6d2o71etIjf79f+IR0MAjEgBB2HThaHcMU9KG229Ftcauie9XeurngMawTRu60YqH7+go8EMf6a1Kdnx37DMy\/1LRlsYJVfEoOCab3GgcIdXrRSYWqsY4SVJZiTPYdqz9vmNPSXXiDSOTl6qXHV\/f53WTS2V5aIQbuJJziXlceusuVNny0o5h+j6ovZ1HhEGAu3lpD+8kY8KUqA4kXMH3VNZqzHBYazJx\/QBB3bG45cZSOvV3gUOnGBgiv9NBWjhvmY0fC3J6Q=="]}]}`, - Iat: int64(1513165889) + 3601, - Err: errors.New("Token is expired"), - }, - { - Name: "Valid Token with RS256 signature verified against empty JWKS document", - TokenString: "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSIsImtpZCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSJ9.eyJhdWQiOiJjaHJvbm9ncmFmIiwiaXNzIjoiaHR0cHM6Ly9kc3RjaW1hYWQxcC5kc3QtaXRzLmRlL2FkZnMiLCJpYXQiOjE1MTMxNjU4ODksImV4cCI6MTUxMzE2OTQ4OSwiYXV0aF90aW1lIjoxNTEzMTY1ODg4LCJzdWIiOiJlWVYzamRsZE55RlkxcUZGSDRvQWRCdkRGZmJWZm51RzI5SGlIa1N1andrPSIsInVwbiI6ImJzY0Bkc3QtaXRzLmRlIiwidW5pcXVlX25hbWUiOiJEU1RcXGJzYyIsInNpZCI6IlMtMS01LTIxLTI1MDUxNTEzOTgtMjY2MTAyODEwOS0zNzU0MjY1ODIwLTExMDQifQ.nK51Ui4XN45SVul9igNaKFQd-F63BNstBzW-T5LBVm_ANHCEHyP3_88C3ffkkQIi3PxYacRJGtfswP35ws7YJUcNp-GoGZARqz62NpMtbQyhos6mCaVXwPoxPbrZx4AkMQgxkZwJcOzceX7mpjcT3kCth30chN3lkhzSjGrXe4ZDOAV25liS-dsdBiqDiaTB91sS534GM76qJQxFUs51oSbYTRdCN1VJ0XopMcasfVDzFrtSbyvEIVXlpKK2HplnhheqF4QHrM_3cjV_NGRr3tYLe-AGTdDXKWlJD1GDz1ECXeMGQHPoz3U8cqNsFLYBstIlCgfnBWgWsPZSvJPJUg", - JwksDocument: "", - Iat: int64(1513165889), - Err: errors.New("unexpected end of JSON input"), - }, - { - Name: "Invalid Token", - Err: errors.New("token contains an invalid number of segments"), - }, - } - - for _, tt := range tests { - t.Run(tt.Name, func(t *testing.T) { - // mock JWKS server - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - io.WriteString(w, tt.JwksDocument) - })) - defer ts.Close() - - j := oauth2.JWT{ - Jwksurl: ts.URL, - Now: func() time.Time { - return time.Unix(tt.Iat, 0) - }, - } - _, err := j.GetClaims(tt.TokenString) - if tt.Err != nil { - if err != nil { - if tt.Err.Error() != err.Error() { - t.Errorf("Error in test %s expected error: %v actual: %v", tt.Name, tt.Err, err) - } // else: that's what we expect - } else { - t.Errorf("Error in test %s expected error: %v actual: none", tt.Name, tt.Err) - } - } else { - if err != nil { - t.Errorf("Error in tt %s: %v", tt.Name, err) - } // else: that's what we expect - } - }) - } -} - -func TestJWT_ExtendedPrincipal(t *testing.T) { - history := time.Unix(-446774400, 0) - type fields struct { - Now func() time.Time - } - type args struct { - ctx context.Context - principal oauth2.Principal - extension time.Duration - } - tests := []struct { - name string - fields fields - args args - want oauth2.Principal - wantErr bool - }{ - { - name: "Extend principal by one hour", - fields: fields{ - Now: func() time.Time { - return history - }, - }, - args: args{ - ctx: context.Background(), - principal: oauth2.Principal{ - ExpiresAt: history, - }, - extension: time.Hour, - }, - want: oauth2.Principal{ - ExpiresAt: history.Add(time.Hour), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - j := &oauth2.JWT{ - Now: tt.fields.Now, - } - got, err := j.ExtendedPrincipal(tt.args.ctx, tt.args.principal, tt.args.extension) - if (err != nil) != tt.wantErr { - t.Errorf("JWT.ExtendedPrincipal() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("JWT.ExtendedPrincipal() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/oauth2/mux.go b/chronograf/oauth2/mux.go deleted file mode 100644 index bc0c4132f40..00000000000 --- a/chronograf/oauth2/mux.go +++ /dev/null @@ -1,201 +0,0 @@ -package oauth2 - -import ( - "net/http" - "path" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "golang.org/x/oauth2" -) - -// Check to ensure AuthMux is an oauth2.Mux -var _ Mux = &AuthMux{} - -// TenMinutes is the default length of time to get a response back from the OAuth provider -const TenMinutes = 10 * time.Minute - -// NewAuthMux constructs a Mux handler that checks a cookie against the authenticator -func NewAuthMux(p Provider, a Authenticator, t Tokenizer, basepath string, l chronograf.Logger, UseIDToken bool) *AuthMux { - return &AuthMux{ - Provider: p, - Auth: a, - Tokens: t, - SuccessURL: path.Join(basepath, "/"), - FailureURL: path.Join(basepath, "/login"), - Now: DefaultNowTime, - Logger: l, - UseIDToken: UseIDToken, - } -} - -// AuthMux services an Oauth2 interaction with a provider and browser and -// stores the resultant token in the user's browser as a cookie. The benefit of -// this is that the cookie's authenticity can be verified independently by any -// Chronograf instance as long as the Authenticator has no external -// dependencies (e.g. on a Database). -type AuthMux struct { - Provider Provider // Provider is the OAuth2 service - Auth Authenticator // Auth is used to Authorize after successful OAuth2 callback and Expire on Logout - Tokens Tokenizer // Tokens is used to create and validate OAuth2 "state" - Logger chronograf.Logger // Logger is used to give some more information about the OAuth2 process - SuccessURL string // SuccessURL is redirect location after successful authorization - FailureURL string // FailureURL is redirect location after authorization failure - Now func() time.Time // Now returns the current time (for testing) - UseIDToken bool // UseIDToken enables OpenID id_token support -} - -// Login uses a Cookie with a random string as the state validation method. JWTs are -// a good choice here for encoding because they can be validated without -// storing state. Login returns a handler that redirects to the providers OAuth login. -func (j *AuthMux) Login() http.Handler { - conf := j.Provider.Config() - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // We are creating a token with an encoded random string to prevent CSRF attacks - // This token will be validated during the OAuth callback. - // We'll give our users 10 minutes from this point to type in their - // oauth2 provider's password. - // If the callback is not received within 10 minutes, then authorization will fail. - csrf := randomString(32) // 32 is not important... just long - now := j.Now() - - // This token will be valid for 10 minutes. Any chronograf server will - // be able to validate this token. - p := Principal{ - Subject: csrf, - IssuedAt: now, - ExpiresAt: now.Add(TenMinutes), - } - token, err := j.Tokens.Create(r.Context(), p) - - // This is likely an internal server error - if err != nil { - j.Logger. - WithField("component", "auth"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL). - Error("Internal authentication error: ", err.Error()) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - url := conf.AuthCodeURL(string(token), oauth2.AccessTypeOnline) - http.Redirect(w, r, url, http.StatusTemporaryRedirect) - }) -} - -// Callback is used by OAuth2 provider after authorization is granted. If -// granted, Callback will set a cookie with a month-long expiration. It is -// recommended that the value of the cookie be encoded as a JWT because the JWT -// can be validated without the need for saving state. The JWT contains the -// principal's identifier (e.g. email address). -func (j *AuthMux) Callback() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - log := j.Logger. - WithField("component", "auth"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL) - - state := r.FormValue("state") - // Check if the OAuth state token is valid to prevent CSRF - // The state variable we set is actually a token. We'll check - // if the token is valid. We don't need to know anything - // about the contents of the principal only that it hasn't expired. - if _, err := j.Tokens.ValidPrincipal(r.Context(), Token(state), TenMinutes); err != nil { - log.Error("Invalid OAuth state received: ", err.Error()) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - - // Exchange the code back with the provider to the the token - conf := j.Provider.Config() - code := r.FormValue("code") - token, err := conf.Exchange(r.Context(), code) - if err != nil { - log.Error("Unable to exchange code for token ", err.Error()) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - - if token.Extra("id_token") != nil && !j.UseIDToken { - log.Info("Found an extra id_token, but option --useidtoken is not set") - } - - // if we received an extra id_token, inspect it - var id string - var group string - if j.UseIDToken && token.Extra("id_token") != nil && token.Extra("id_token") != "" { - log.Debug("Found an extra id_token") - if provider, ok := j.Provider.(ExtendedProvider); ok { - log.Debug("Provider implements PrincipalIDFromClaims()") - tokenString, ok := token.Extra("id_token").(string) - if !ok { - log.Error("Cannot cast id_token as string") - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - claims, err := j.Tokens.GetClaims(tokenString) - if err != nil { - log.Error("Parsing extra id_token failed:", err) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - log.Debug("Found claims: ", claims) - id, err = provider.PrincipalIDFromClaims(claims) - if err != nil { - log.Error("Requested claim not found in id_token:", err) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - group, err = provider.GroupFromClaims(claims) - if err != nil { - log.Error("Requested claim not found in id_token:", err) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - } else { - log.Debug("Provider does not implement PrincipalIDFromClaims()") - } - } else { - // otherwise perform an additional lookup - oauthClient := conf.Client(r.Context(), token) - // Using the token get the principal identifier from the provider - id, err = j.Provider.PrincipalID(oauthClient) - if err != nil { - log.Error("Unable to get principal identifier ", err.Error()) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - group, err = j.Provider.Group(oauthClient) - if err != nil { - log.Error("Unable to get OAuth Group", err.Error()) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - } - - p := Principal{ - Subject: id, - Issuer: j.Provider.Name(), - Group: group, - } - ctx := r.Context() - err = j.Auth.Authorize(ctx, w, p) - if err != nil { - log.Error("Unable to get add session to response ", err.Error()) - http.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect) - return - } - log.Info("User ", id, " is authenticated") - http.Redirect(w, r, j.SuccessURL, http.StatusTemporaryRedirect) - }) -} - -// Logout handler will expire our authentication cookie and redirect to the successURL -func (j *AuthMux) Logout() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - j.Auth.Expire(w) - http.Redirect(w, r, j.SuccessURL, http.StatusTemporaryRedirect) - }) -} diff --git a/chronograf/oauth2/mux_test.go b/chronograf/oauth2/mux_test.go deleted file mode 100644 index 964cb08afdc..00000000000 --- a/chronograf/oauth2/mux_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "net/http" - "net/http/cookiejar" - "net/http/httptest" - "net/url" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var testTime = time.Date(1985, time.October, 25, 18, 0, 0, 0, time.UTC) - -type mockCallbackResponse struct { - AccessToken string `json:"access_token"` -} - -// setupMuxTest produces an http.Client and an httptest.Server configured to -// use a particular http.Handler selected from a AuthMux. As this selection is -// done during the setup process, this configuration is performed by providing -// a function, and returning the desired handler. Cleanup is still the -// responsibility of the test writer, so the httptest.Server's Close() method -// should be deferred. -func setupMuxTest(response interface{}, selector func(*AuthMux) http.Handler) (*http.Client, *httptest.Server, *httptest.Server) { - provider := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Set("content-type", "application/json") - rw.WriteHeader(http.StatusOK) - - body, _ := json.Marshal(response) - - rw.Write(body) - })) - - now := func() time.Time { - return testTime - } - mp := &MockProvider{ - Email: "biff@example.com", - ProviderURL: provider.URL, - Orgs: "", - } - mt := &YesManTokenizer{} - auth := &cookie{ - Name: DefaultCookieName, - Lifespan: 1 * time.Hour, - Inactivity: DefaultInactivityDuration, - Now: now, - Tokens: mt, - } - - useidtoken := false - - jm := NewAuthMux(mp, auth, mt, "", &chronograf.NoopLogger{}, useidtoken) - ts := httptest.NewServer(selector(jm)) - jar, _ := cookiejar.New(nil) - hc := http.Client{ - Jar: jar, - CheckRedirect: func(r *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - return &hc, ts, provider -} - -// teardownMuxTest cleans up any resources created by setupMuxTest. This should -// be deferred in your test after setupMuxTest is called -func teardownMuxTest(hc *http.Client, backend *httptest.Server, provider *httptest.Server) { - provider.Close() - backend.Close() -} - -func Test_AuthMux_Logout_DeletesSessionCookie(t *testing.T) { - t.Parallel() - - var response interface{} - - hc, ts, prov := setupMuxTest(response, func(j *AuthMux) http.Handler { - return j.Logout() - }) - defer teardownMuxTest(hc, ts, prov) - - tsURL, _ := url.Parse(ts.URL) - - hc.Jar.SetCookies(tsURL, []*http.Cookie{ - &http.Cookie{ - Name: DefaultCookieName, - Value: "", - }, - }) - - resp, err := hc.Get(ts.URL) - if err != nil { - t.Fatal("Error communicating with Logout() handler: err:", err) - } - - if resp.StatusCode < 300 || resp.StatusCode >= 400 { - t.Fatal("Expected to be redirected, but received status code", resp.StatusCode) - } - - cookies := resp.Cookies() - if len(cookies) != 1 { - t.Fatal("Expected that cookie would be present but wasn't") - } - - c := cookies[0] - if c.Name != DefaultCookieName || c.Expires != testTime.Add(-1*time.Hour) { - t.Fatal("Expected cookie to be expired but wasn't") - } -} - -func Test_AuthMux_Login_RedirectsToCorrectURL(t *testing.T) { - t.Parallel() - - var response interface{} - - hc, ts, prov := setupMuxTest(response, func(j *AuthMux) http.Handler { - return j.Login() // Use Login handler for httptest server. - }) - defer teardownMuxTest(hc, ts, prov) - - resp, err := hc.Get(ts.URL) - if err != nil { - t.Fatal("Error communicating with Login() handler: err:", err) - } - - // Ensure we were redirected - if resp.StatusCode < 300 || resp.StatusCode >= 400 { - t.Fatal("Expected to be redirected, but received status code", resp.StatusCode) - } - - loc, err := resp.Location() - if err != nil { - t.Fatal("Expected a location to be redirected to, but wasn't present") - } - - if state := loc.Query().Get("state"); state != "HELLO?!MCFLY?!ANYONEINTHERE?!" { - t.Fatalf("Expected state to be %s set but was %s", "HELLO?!MCFLY?!ANYONEINTHERE?!", state) - } -} - -func Test_AuthMux_Callback_SetsCookie(t *testing.T) { - response := mockCallbackResponse{AccessToken: "123"} - hc, ts, prov := setupMuxTest(response, func(j *AuthMux) http.Handler { - return j.Callback() - }) - defer teardownMuxTest(hc, ts, prov) - - tsURL, _ := url.Parse(ts.URL) - - v := url.Values{ - "code": {"4815162342"}, - "state": {"foobar"}, - } - - tsURL.RawQuery = v.Encode() - - resp, err := hc.Get(tsURL.String()) - if err != nil { - t.Fatal("Error communicating with Callback() handler: err", err) - } - - // Ensure we were redirected - if resp.StatusCode < 300 || resp.StatusCode >= 400 { - t.Fatal("Expected to be redirected, but received status code", resp.StatusCode) - } - - // Check that cookie was set - cookies := resp.Cookies() - if count := len(cookies); count != 1 { - t.Fatal("Expected exactly one cookie to be set but found", count) - } - - c := cookies[0] - - if c.Name != DefaultCookieName { - t.Fatal("Expected cookie to be named", DefaultCookieName, "but was", c.Name) - } -} - -func Test_AuthMux_Callback_HandlesIdToken(t *testing.T) { - // body taken from ADFS4 - response := mockCallbackResponse{AccessToken: `eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSJ9.eyJhdWQiOiJ1cm46bWljcm9zb2Z0OnVzZXJpbmZvIiwiaXNzIjoiaHR0cDovL2RzdGNpbWFhZDFwLmRzdC1pdHMuZGUvYWRmcy9zZXJ2aWNlcy90cnVzdCIsImlhdCI6MTUxNTcwMDU2NSwiZXhwIjoxNTE1NzA0MTY1LCJhcHB0eXBlIjoiQ29uZmlkZW50aWFsIiwiYXBwaWQiOiJjaHJvbm9ncmFmIiwiYXV0aG1ldGhvZCI6InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMDphYzpjbGFzc2VzOlBhc3N3b3JkUHJvdGVjdGVkVHJhbnNwb3J0IiwiYXV0aF90aW1lIjoiMjAxOC0wMS0xMVQxOTo1MToyNS44MDZaIiwidmVyIjoiMS4wIiwic2NwIjoib3BlbmlkIiwic3ViIjoiZVlWM2pkbGROeUZZMXFGRkg0b0FkQnZERmZiVmZudUcyOUhpSGtTdWp3az0ifQ.sf1qJys9LMUp2S232IRK2aTXiPCE93O-cUdYQQz7kg2woyD46KLwwKIYJVqMaqLspTn3OmaIhKtgx5ZXyAEtihODB1GOBK7DBNRBYCS1iqY_v2-Qwjf7hgaNaCqBjs0DZJspfp5G9MTykvD1FOtQNjPOcBW-i2bblG9L9jlmMbOZ3F7wrZMrroTSkiSn_gRiw2SnN8K7w8WrMEXNK2_jg9ZJ7aSHeUSBwkRNFRds2QNho3HWHg-zcsZFdZ4UGSt-6Az_0LY3yENMLj5us5Rl6Qzk_Re2dhFrlnlXlY1v1DEp3icCvvjkv6AeZWjTfW4qETZaCXUKtSyZ7d5_V1CRDQ", "token_type": "bearer", "expires_in": 3600, "resource": "urn:microsoft:userinfo", "refresh_token": "X9ZGO4H1bMk2bFeOfpv18BzAuFBzUPKQNfOEfdp60FkAAQAALPEBfj23FPEzajle-hm4DrDXp8-Kj53OqoVGalyZeuR-lfJzxpQXQhRAXZOUTuuQ8AQycByh9AylQYDA0jdMFW4FL4WL_6JhNh2JrtXCv2HQ9ozbUq9F7u_O0cY7u0P2pfNujQfk3ckYn-CMVjXbuwJTve6bXUR0JDp5c195bAVA5eFWyI-2uh432t7viyaIjAVbWxQF4fvimcpF1Et9cGodZHVsrZzGxKRnzwjYkWHsqm9go4KOeSKN6MlcWbjvS1UdMjQXSvoqSI00JnSMC3hxJZFn5JcmAPB1AMnJf4VvXZ5b-aOnwdX09YT8KayWkWekAsuZqTAsFwhZPVCRGWAFAADy0e2fTe6l-U6Cj_2bWsq6Snm1QEpWHXuwOJKWZJH-9yQn8KK3KzRowSzRuACzEIpZS5skrqXs_-2aOaZibNpjCEVyw8fF8GTw3VRLufsSrMQ5pD0KL7TppTGFpaqgwIH1yq6T8aRY4DeyoJkNpnO9cw1wuqnY7oGF-J25sfZ4XNWhk6o5e9A45PXhTilClyDKDLqTfdoIsG1Koc2ywqTIb-XI_EbWR3e4ijy8Kmlehw1kU9_xAG0MmmD2HTyGHZCBRgrskYCcHd-UNgCMrNAb5dZQ8NwpKtEL46qIq4R0lheTRRK8sOWzzuJXmvDEoJiIxqSR3Ma4MOISi-vsIsAuiEL9G1aMOkDRj-kDVmqrdKRAwYnN78AWY5EFfkQJyVBbiG882wBh9S0q3HUUCxzFerOvl4eDlVn6m18rRMz7CVZYBBltGtHRhEOQ4gumICR5JRrXAC50aBmUlhDiiMdbEIwJrvWrkhKE0oAJznqC7gleP0E4EOEh9r6CEGZ7Oj8X9Cdzjbuq2G1JGBm_yUvkhAcV61DjOiIQl35BpOfshveNZf_caUtNMa2i07BBmezve17-2kWGzRunr1BD1vMTz41z-H62fy4McR47WJjdDJnuy4DH5AZYQ6ooVxWCtEqeqRPYpzO0XdOdJGXFqXs9JzDKVXTgnHU443hZBC5H-BJkZDuuJ_ZWNKXf03JhouWkxXcdaMbuaQYOZJsUySVyJ5X4usrBFjW4udZAzy7mua-nJncbvcwoyVXiFlRfZiySXolQ9865N7XUnEk_2PijMLoVDATDbA09XuRySvngNsdsQ27p21dPxChXdtpD5ofNqKJ2FBzFKmxCkuX7L01N1nDpWQTuxhHF0JfxSKG5m3jcTx8Bd7Un94mTuAB7RuglDqkdQB9o4X9NHNGSdqGQaK-xeKoNCFWevk3VZoDoY9w2NqSNV2VIuqhy7SxtDSMjZKC5kiQi5EfGeTYZAvTwMYwaXb7K4WWtscy_ZE15EOCVeYi0hM1Ma8iFFTANkSRyX83Ju4SRphxRKnpKcJ2pPYH784I5HOm5sclhUL3aLeAA161QgxRBSa9YVIZfyXHyWQTcbNucNdhmdUZnKfRv1xtXcS9VAx2yAkoKFehZivEINX0Y500-WZ1eT_RXp0BfCKmJQ8Fu50oTaI-c5h2Q3Gp_LTSODNnMrjJiJxCLD_LD1fd1e8jTYDV3NroGlpWTuTdjMUm-Z1SMXaaJzQGEnNT6F8b6un9228L6YrDC_3MJ5J80VAHL5EO1GesdEWblugCL7AQDtFjNXq0lK8Aoo8X9_hlvDwgfdR16l8QALPT1HJVzlHPG8G3dRe50TKZnl3obU0WXN1KYG1EC4Qa3LyaVCIuGJYOeFqjMINrf7PoM368nS9yhrY08nnoHZbQ7IeA1KsNq2kANeH1doCNfWrXDwn8KxjYxZPEnzvlQ5M1RIzArOqzWL8NbftW1q2yCZZ4RVg0vOTVXsqWFnQIvWK-mkELa7bvByFzbtVHOJpc_2EKBKBNv6IYUENRCu2TOf6w7u42yvng7ccoXRTiUFUlKgVmswf9FzISxFd-YKgrzp3bMhC3gReGqcJuqEwnXPvOAY_BAkVMSd_ZaCFuyclRjFvUxrAg1T_cqOvRIlJ2Qq7z4u7W3BAo9BtFdj8QNLKJXtvvzXTprglRPDNP_QEPAkwZ_Uxa13vdYFcG18WCx4GbWQXchl5B7DnISobcdCH34M-I0xDZN98VWQVmLAfPniDUD30C8pfiYF7tW_EVy958Eg_JWVy0SstYEhV-y-adrJ1Oimjv0ptsWv-yErKBUD14aex9A_QqdnTXZUg.tqMb72eWAkAIvInuLp57NDyGxfYvms3NnhN-mllkYb7Xpd8gVbQFc2mYdzOOhtnfGuakyXYF4rZdJonQwzBO6C9KYuARciUU1Ms4bWPC-aeNO5t-aO_bDZbwC9qMPmq5ZuxG633BARGaw26fr0Z7qhcJMiou_EuaIehYTKkPB-mxtRAhxxyX91qqe0-PJnCHWoxizC4hDCUwp9Jb54tNf34BG3vtkXFX-kUARNfGucgKUkh6RYkhWiMBsMVoyWmkFXB5fYxmCAH5c5wDW6srKdyIDEWZInliuKbYR0p66vg1FfoSi4bBfrsm5NtCtLKG9V6Q0FEIA6tRRgHmKUGpkw", "refresh_token_expires_in": 28519, "scope": "openid", "id_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSIsImtpZCI6IllEQlVocWRXa3NLWGRHdVgwc3l0amFVdXhoQSJ9.eyJhdWQiOiJjaHJvbm9ncmFmIiwiaXNzIjoiaHR0cHM6Ly9kc3RjaW1hYWQxcC5kc3QtaXRzLmRlL2FkZnMiLCJpYXQiOjE1MTU3MDA1NjUsImV4cCI6MTUxNTcwNDE2NSwiYXV0aF90aW1lIjoxNTE1NzAwMjg1LCJzdWIiOiJlWVYzamRsZE55RlkxcUZGSDRvQWRCdkRGZmJWZm51RzI5SGlIa1N1andrPSIsInVwbiI6ImJzY0Bkc3QtaXRzLmRlIiwidW5pcXVlX25hbWUiOiJEU1RcXGJzYyIsInNpZCI6IlMtMS01LTIxLTI1MDUxNTEzOTgtMjY2MTAyODEwOS0zNzU0MjY1ODIwLTExMDQifQ.XD873K6NVRTJY1700NsflLJGZKFHJfNBjB81SlADVdAHbhnq7wkAZbGEEm8wFqvTKKysUl9EALzmDa2tR9nzohVvmHftIYBO0E-wPBzdzWWX0coEgpVAc-SysP-eIQWLsj8EaodaMkCgKO0FbTWOf4GaGIBZGklrr9EEk8VRSdbXbm6Sv9WVphezEzxq6JJBRBlCVibCnZjR5OYh1Vw_7E7P38ESPbpLY3hYYl2hz4y6dQJqCwGr7YP8KrDlYtbosZYgT7ayxokEJI1udEbX5PbAq5G6mj5rLfSOl85rMg-psZiivoM8dn9lEl2P7oT8rAvMWvQp-FIRQQHwqf9cxw`} - hc, ts, prov := setupMuxTest(response, func(j *AuthMux) http.Handler { - return j.Callback() - }) - defer teardownMuxTest(hc, ts, prov) - - tsURL, _ := url.Parse(ts.URL) - - v := url.Values{ - "code": {"4815162342"}, - "state": {"foobar"}, - } - - tsURL.RawQuery = v.Encode() - - resp, err := hc.Get(tsURL.String()) - if err != nil { - t.Fatal("Error communicating with Callback() handler: err", err) - } - - // Ensure we were redirected - if resp.StatusCode < 300 || resp.StatusCode >= 400 { - t.Fatal("Expected to be redirected, but received status code", resp.StatusCode) - } - - // Check that cookie was set - cookies := resp.Cookies() - if count := len(cookies); count != 1 { - t.Fatal("Expected exactly one cookie to be set but found", count) - } - - c := cookies[0] - - if c.Name != DefaultCookieName { - t.Fatal("Expected cookie to be named", DefaultCookieName, "but was", c.Name) - } -} diff --git a/chronograf/oauth2/oauth2.go b/chronograf/oauth2/oauth2.go deleted file mode 100644 index 5788a882fd9..00000000000 --- a/chronograf/oauth2/oauth2.go +++ /dev/null @@ -1,101 +0,0 @@ -package oauth2 - -import ( - "context" - "errors" - "net/http" - "time" - - gojwt "github.com/dgrijalva/jwt-go" - "golang.org/x/oauth2" -) - -type principalKey string - -func (p principalKey) String() string { - return string(p) -} - -var ( - // PrincipalKey is used to pass principal - // via context.Context to request-scoped - // functions. - PrincipalKey = principalKey("principal") - // ErrAuthentication means that oauth2 exchange failed - ErrAuthentication = errors.New("user not authenticated") - // ErrOrgMembership means that the user is not in the OAuth2 filtered group - ErrOrgMembership = errors.New("not a member of the required organization") -) - -/* Types */ - -// Principal is any entity that can be authenticated -type Principal struct { - Subject string - Issuer string - Organization string - Group string - ExpiresAt time.Time - IssuedAt time.Time -} - -/* Interfaces */ - -// Provider are the common parameters for all providers (RFC 6749) -type Provider interface { - // ID is issued to the registered client by the authorization (RFC 6749 Section 2.2) - ID() string - // Secret associated is with the ID (Section 2.2) - Secret() string - // Scopes is used by the authorization server to "scope" responses (Section 3.3) - Scopes() []string - // Config is the OAuth2 configuration settings for this provider - Config() *oauth2.Config - // PrincipalID with fetch the identifier to be associated with the principal. - PrincipalID(provider *http.Client) (string, error) - // Name is the name of the Provider - Name() string - // Group is a comma delimited list of groups and organizations for a provider - // TODO: This will break if there are any group names that contain commas. - // I think this is okay, but I'm not 100% certain. - Group(provider *http.Client) (string, error) -} - -// Mux is a collection of handlers responsible for servicing an Oauth2 interaction between a browser and a provider -type Mux interface { - Login() http.Handler - Logout() http.Handler - Callback() http.Handler -} - -// Authenticator represents a service for authenticating users. -type Authenticator interface { - // Validate returns Principal associated with authenticated and authorized - // entity if successful. - Validate(context.Context, *http.Request) (Principal, error) - // Authorize will grant privileges to a Principal - Authorize(context.Context, http.ResponseWriter, Principal) error - // Extend will extend the lifetime of a already validated Principal - Extend(context.Context, http.ResponseWriter, Principal) (Principal, error) - // Expire revokes privileges from a Principal - Expire(http.ResponseWriter) -} - -// Token represents a time-dependent reference (i.e. identifier) that maps back -// to the sensitive data through a tokenization system -type Token string - -// Tokenizer substitutes a sensitive data element (Principal) with a -// non-sensitive equivalent, referred to as a token, that has no extrinsic -// or exploitable meaning or value. -type Tokenizer interface { - // Create issues a token at Principal's IssuedAt that lasts until Principal's ExpireAt - Create(context.Context, Principal) (Token, error) - // ValidPrincipal checks if the token has a valid Principal and requires - // a lifespan duration to ensure it complies with possible server runtime arguments. - ValidPrincipal(ctx context.Context, token Token, lifespan time.Duration) (Principal, error) - // ExtendedPrincipal adds the extention to the principal's lifespan. - ExtendedPrincipal(ctx context.Context, principal Principal, extension time.Duration) (Principal, error) - // GetClaims returns a map with verified claims - GetClaims(tokenString string) (gojwt.MapClaims, error) -} diff --git a/chronograf/oauth2/oauth2_test.go b/chronograf/oauth2/oauth2_test.go deleted file mode 100644 index 13034892980..00000000000 --- a/chronograf/oauth2/oauth2_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package oauth2 - -import ( - "context" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "time" - - goauth "golang.org/x/oauth2" - - gojwt "github.com/dgrijalva/jwt-go" - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ Provider = &MockProvider{} - -type MockProvider struct { - Email string - Orgs string - - ProviderURL string -} - -func (mp *MockProvider) Config() *goauth.Config { - return &goauth.Config{ - RedirectURL: "http://www.example.com", - ClientID: "4815162342", - ClientSecret: "8675309", - Endpoint: goauth.Endpoint{ - AuthURL: mp.ProviderURL + "/oauth/auth", - TokenURL: mp.ProviderURL + "/oauth/token", - }, - } -} - -func (mp *MockProvider) ID() string { - return "8675309" -} - -func (mp *MockProvider) Name() string { - return "mockly" -} - -func (mp *MockProvider) PrincipalID(provider *http.Client) (string, error) { - return mp.Email, nil -} - -func (mp *MockProvider) PrincipalIDFromClaims(claims gojwt.MapClaims) (string, error) { - return mp.Email, nil -} - -func (mp *MockProvider) GroupFromClaims(claims gojwt.MapClaims) (string, error) { - email := strings.Split(mp.Email, "@") - if len(email) != 2 { - //g.Logger.Error("malformed email address, expected %q to contain @ symbol", id) - return "DEFAULT", nil - } - - return email[1], nil -} - -func (mp *MockProvider) Group(provider *http.Client) (string, error) { - return mp.Orgs, nil -} - -func (mp *MockProvider) Scopes() []string { - return []string{} -} - -func (mp *MockProvider) Secret() string { - return "4815162342" -} - -var _ Tokenizer = &YesManTokenizer{} - -type YesManTokenizer struct{} - -func (y *YesManTokenizer) ValidPrincipal(ctx context.Context, token Token, duration time.Duration) (Principal, error) { - return Principal{ - Subject: "biff@example.com", - Issuer: "Biff Tannen's Pleasure Paradise", - }, nil -} - -func (y *YesManTokenizer) Create(ctx context.Context, p Principal) (Token, error) { - return Token("HELLO?!MCFLY?!ANYONEINTHERE?!"), nil -} - -func (y *YesManTokenizer) ExtendedPrincipal(ctx context.Context, p Principal, ext time.Duration) (Principal, error) { - return p, nil -} - -func (y *YesManTokenizer) GetClaims(tokenString string) (gojwt.MapClaims, error) { - return gojwt.MapClaims{}, nil -} - -func NewTestTripper(log chronograf.Logger, ts *httptest.Server, rt http.RoundTripper) (*TestTripper, error) { - url, err := url.Parse(ts.URL) - if err != nil { - return nil, err - } - return &TestTripper{log, rt, url}, nil -} - -type TestTripper struct { - Log chronograf.Logger - - rt http.RoundTripper - tsURL *url.URL -} - -// RoundTrip modifies the Hostname of the incoming request to be directed to the -// test server. -func (tt *TestTripper) RoundTrip(r *http.Request) (*http.Response, error) { - tt.Log. - WithField("component", "test"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL). - Info("Request") - - r.URL.Host = tt.tsURL.Host - r.URL.Scheme = tt.tsURL.Scheme - - return tt.rt.RoundTrip(r) -} diff --git a/chronograf/oauth2/time.go b/chronograf/oauth2/time.go deleted file mode 100644 index 529e1c4b70d..00000000000 --- a/chronograf/oauth2/time.go +++ /dev/null @@ -1,6 +0,0 @@ -package oauth2 - -import "time" - -// DefaultNowTime returns UTC time at the present moment -var DefaultNowTime = func() time.Time { return time.Now().UTC() } diff --git a/chronograf/organizations/dashboards.go b/chronograf/organizations/dashboards.go deleted file mode 100644 index 1b79b30a119..00000000000 --- a/chronograf/organizations/dashboards.go +++ /dev/null @@ -1,112 +0,0 @@ -package organizations - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure that DashboardsStore implements chronograf.DashboardStore -var _ chronograf.DashboardsStore = &DashboardsStore{} - -// DashboardsStore facade on a DashboardStore that filters dashboards -// by organization. -type DashboardsStore struct { - store chronograf.DashboardsStore - organization string -} - -// NewDashboardsStore creates a new DashboardsStore from an existing -// chronograf.DashboardStore and an organization string -func NewDashboardsStore(s chronograf.DashboardsStore, org string) *DashboardsStore { - return &DashboardsStore{ - store: s, - organization: org, - } -} - -// All retrieves all dashboards from the underlying DashboardStore and filters them -// by organization. -func (s *DashboardsStore) All(ctx context.Context) ([]chronograf.Dashboard, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - ds, err := s.store.All(ctx) - if err != nil { - return nil, err - } - - // This filters dashboards without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - dashboards := ds[:0] - for _, d := range ds { - if d.Organization == s.organization { - dashboards = append(dashboards, d) - } - } - - return dashboards, nil -} - -// Add creates a new Dashboard in the DashboardsStore with dashboard.Organization set to be the -// organization from the dashboard store. -func (s *DashboardsStore) Add(ctx context.Context, d chronograf.Dashboard) (chronograf.Dashboard, error) { - err := validOrganization(ctx) - if err != nil { - return chronograf.Dashboard{}, err - } - - d.Organization = s.organization - return s.store.Add(ctx, d) -} - -// Delete the dashboard from DashboardsStore -func (s *DashboardsStore) Delete(ctx context.Context, d chronograf.Dashboard) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - d, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Delete(ctx, d) -} - -// Get returns a Dashboard if the id exists and belongs to the organization that is set. -func (s *DashboardsStore) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - err := validOrganization(ctx) - if err != nil { - return chronograf.Dashboard{}, err - } - - d, err := s.store.Get(ctx, id) - if err != nil { - return chronograf.Dashboard{}, err - } - - if d.Organization != s.organization { - return chronograf.Dashboard{}, chronograf.ErrDashboardNotFound - } - - return d, nil -} - -// Update the dashboard in DashboardsStore. -func (s *DashboardsStore) Update(ctx context.Context, d chronograf.Dashboard) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - _, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Update(ctx, d) -} diff --git a/chronograf/organizations/dashboards_test.go b/chronograf/organizations/dashboards_test.go deleted file mode 100644 index 8b929bd8d0a..00000000000 --- a/chronograf/organizations/dashboards_test.go +++ /dev/null @@ -1,342 +0,0 @@ -package organizations_test - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/organizations" -) - -// IgnoreFields is used because ID cannot be predicted reliably -// EquateEmpty is used because we want nil slices, arrays, and maps to be equal to the empty map -var dashboardCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(chronograf.Dashboard{}, "ID"), -} - -func TestDashboards_All(t *testing.T) { - type fields struct { - DashboardsStore chronograf.DashboardsStore - } - type args struct { - organization string - } - tests := []struct { - name string - args args - fields fields - want []chronograf.Dashboard - wantErr bool - }{ - { - name: "No Dashboards", - fields: fields{ - DashboardsStore: &mocks.DashboardsStore{ - AllF: func(ctx context.Context) ([]chronograf.Dashboard, error) { - return nil, fmt.Errorf("no Dashboards") - }, - }, - }, - wantErr: true, - }, - { - name: "All Dashboards", - fields: fields{ - DashboardsStore: &mocks.DashboardsStore{ - AllF: func(ctx context.Context) ([]chronograf.Dashboard, error) { - return []chronograf.Dashboard{ - { - Name: "howdy", - Organization: "1337", - }, - { - Name: "doody", - Organization: "1338", - }, - }, nil - }, - }, - }, - args: args{ - organization: "1337", - }, - want: []chronograf.Dashboard{ - { - Name: "howdy", - Organization: "1337", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := organizations.NewDashboardsStore(tt.fields.DashboardsStore, tt.args.organization) - ctx := context.WithValue(context.Background(), organizations.ContextKey, tt.args.organization) - gots, err := s.All(ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. DashboardsStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - return - } - for i, got := range gots { - if diff := cmp.Diff(got, tt.want[i], dashboardCmpOptions...); diff != "" { - t.Errorf("%q. DashboardsStore.All():\n-got/+want\ndiff %s", tt.name, diff) - } - } - }) - } -} - -func TestDashboards_Add(t *testing.T) { - type fields struct { - DashboardsStore chronograf.DashboardsStore - } - type args struct { - organization string - ctx context.Context - dashboard chronograf.Dashboard - } - tests := []struct { - name string - args args - fields fields - want chronograf.Dashboard - wantErr bool - }{ - { - name: "Add Dashboard", - fields: fields{ - DashboardsStore: &mocks.DashboardsStore{ - AddF: func(ctx context.Context, s chronograf.Dashboard) (chronograf.Dashboard, error) { - return s, nil - }, - GetF: func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - dashboard: chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - }, - }, - want: chronograf.Dashboard{ - Name: "howdy", - Organization: "1337", - }, - }, - } - for _, tt := range tests { - s := organizations.NewDashboardsStore(tt.fields.DashboardsStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - d, err := s.Add(tt.args.ctx, tt.args.dashboard) - if (err != nil) != tt.wantErr { - t.Errorf("%q. DashboardsStore.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - got, err := s.Get(tt.args.ctx, d.ID) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, dashboardCmpOptions...); diff != "" { - t.Errorf("%q. DashboardsStore.Add():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestDashboards_Delete(t *testing.T) { - type fields struct { - DashboardsStore chronograf.DashboardsStore - } - type args struct { - organization string - ctx context.Context - dashboard chronograf.Dashboard - } - tests := []struct { - name string - fields fields - args args - addFirst bool - wantErr bool - }{ - { - name: "Delete dashboard", - fields: fields{ - DashboardsStore: &mocks.DashboardsStore{ - DeleteF: func(ctx context.Context, s chronograf.Dashboard) error { - return nil - }, - GetF: func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - dashboard: chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - addFirst: true, - }, - } - for _, tt := range tests { - s := organizations.NewDashboardsStore(tt.fields.DashboardsStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - err := s.Delete(tt.args.ctx, tt.args.dashboard) - if (err != nil) != tt.wantErr { - t.Errorf("%q. DashboardsStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - } -} - -func TestDashboards_Get(t *testing.T) { - type fields struct { - DashboardsStore chronograf.DashboardsStore - } - type args struct { - organization string - ctx context.Context - dashboard chronograf.Dashboard - } - tests := []struct { - name string - fields fields - args args - want chronograf.Dashboard - wantErr bool - }{ - { - name: "Get Dashboard", - fields: fields{ - DashboardsStore: &mocks.DashboardsStore{ - GetF: func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - dashboard: chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - want: chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - } - for _, tt := range tests { - s := organizations.NewDashboardsStore(tt.fields.DashboardsStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - got, err := s.Get(tt.args.ctx, tt.args.dashboard.ID) - if (err != nil) != tt.wantErr { - t.Errorf("%q. DashboardsStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(got, tt.want, dashboardCmpOptions...); diff != "" { - t.Errorf("%q. DashboardsStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestDashboards_Update(t *testing.T) { - type fields struct { - DashboardsStore chronograf.DashboardsStore - } - type args struct { - organization string - ctx context.Context - dashboard chronograf.Dashboard - name string - } - tests := []struct { - name string - fields fields - args args - want chronograf.Dashboard - addFirst bool - wantErr bool - }{ - { - name: "Update Dashboard Name", - fields: fields{ - DashboardsStore: &mocks.DashboardsStore{ - UpdateF: func(ctx context.Context, s chronograf.Dashboard) error { - return nil - }, - GetF: func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: 1229, - Name: "doody", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - dashboard: chronograf.Dashboard{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - name: "doody", - }, - want: chronograf.Dashboard{ - Name: "doody", - Organization: "1337", - }, - addFirst: true, - }, - } - for _, tt := range tests { - if tt.args.name != "" { - tt.args.dashboard.Name = tt.args.name - } - s := organizations.NewDashboardsStore(tt.fields.DashboardsStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - err := s.Update(tt.args.ctx, tt.args.dashboard) - if (err != nil) != tt.wantErr { - t.Errorf("%q. DashboardsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - got, err := s.Get(tt.args.ctx, tt.args.dashboard.ID) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, dashboardCmpOptions...); diff != "" { - t.Errorf("%q. DashboardsStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} diff --git a/chronograf/organizations/org_config.go b/chronograf/organizations/org_config.go deleted file mode 100644 index 1378a1e6eb3..00000000000 --- a/chronograf/organizations/org_config.go +++ /dev/null @@ -1,51 +0,0 @@ -package organizations - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure that OrganizationConfig implements chronograf.OrganizationConfigStore -var _ chronograf.OrganizationConfigStore = &OrganizationConfigStore{} - -// OrganizationConfigStore facade on a OrganizationConfig that filters OrganizationConfigs by organization. -type OrganizationConfigStore struct { - store chronograf.OrganizationConfigStore - organization string -} - -// NewOrganizationConfigStore creates a new OrganizationConfigStore from an existing -// chronograf.OrganizationConfigStore and an organization string -func NewOrganizationConfigStore(s chronograf.OrganizationConfigStore, orgID string) *OrganizationConfigStore { - return &OrganizationConfigStore{ - store: s, - organization: orgID, - } -} - -// FindOrCreate gets an organization's config or creates one if none exists -func (s *OrganizationConfigStore) FindOrCreate(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - var err = validOrganization(ctx) - if err != nil { - return nil, err - } - - oc, err := s.store.FindOrCreate(ctx, orgID) - if err != nil { - return nil, err - } - - return oc, nil - -} - -// Put the OrganizationConfig in OrganizationConfigStore. -func (s *OrganizationConfigStore) Put(ctx context.Context, c *chronograf.OrganizationConfig) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - return s.store.Put(ctx, c) -} diff --git a/chronograf/organizations/organizations.go b/chronograf/organizations/organizations.go deleted file mode 100644 index 57a7ee1b7f2..00000000000 --- a/chronograf/organizations/organizations.go +++ /dev/null @@ -1,158 +0,0 @@ -package organizations - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type contextKey string - -// ContextKey is the key used to specify the -// organization via context -const ContextKey = contextKey("organization") - -func validOrganization(ctx context.Context) error { - // prevents panic in case of nil context - if ctx == nil { - return fmt.Errorf("expect non nil context") - } - orgID, ok := ctx.Value(ContextKey).(string) - // should never happen - if !ok { - return fmt.Errorf("expected organization key to be a string") - } - if orgID == "" { - return fmt.Errorf("expected organization key to be set") - } - return nil -} - -// ensure that OrganizationsStore implements chronograf.OrganizationStore -var _ chronograf.OrganizationsStore = &OrganizationsStore{} - -// OrganizationsStore facade on a OrganizationStore that filters organizations -// by organization. -type OrganizationsStore struct { - store chronograf.OrganizationsStore - organization string -} - -// NewOrganizationsStore creates a new OrganizationsStore from an existing -// chronograf.OrganizationStore and an organization string -func NewOrganizationsStore(s chronograf.OrganizationsStore, org string) *OrganizationsStore { - return &OrganizationsStore{ - store: s, - organization: org, - } -} - -// All retrieves all organizations from the underlying OrganizationStore and filters them -// by organization. -func (s *OrganizationsStore) All(ctx context.Context) ([]chronograf.Organization, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - ds, err := s.store.All(ctx) - if err != nil { - return nil, err - } - - defaultOrg, err := s.store.DefaultOrganization(ctx) - if err != nil { - return nil, err - } - - defaultOrgID := defaultOrg.ID - - // This filters organizations without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - organizations := ds[:0] - for _, d := range ds { - id := d.ID - switch id { - case s.organization, defaultOrgID: - organizations = append(organizations, d) - default: - continue - } - } - - return organizations, nil -} - -// Add creates a new Organization in the OrganizationsStore with organization.Organization set to be the -// organization from the organization store. -func (s *OrganizationsStore) Add(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) { - return nil, fmt.Errorf("cannot create organization") -} - -// Delete the organization from OrganizationsStore -func (s *OrganizationsStore) Delete(ctx context.Context, o *chronograf.Organization) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - o, err = s.store.Get(ctx, chronograf.OrganizationQuery{ID: &o.ID}) - if err != nil { - return err - } - - return s.store.Delete(ctx, o) -} - -// Get returns a Organization if the id exists and belongs to the organization that is set. -func (s *OrganizationsStore) Get(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - d, err := s.store.Get(ctx, q) - if err != nil { - return nil, err - } - - if d.ID != s.organization { - return nil, chronograf.ErrOrganizationNotFound - } - - return d, nil -} - -// Update the organization in OrganizationsStore. -func (s *OrganizationsStore) Update(ctx context.Context, o *chronograf.Organization) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - _, err = s.store.Get(ctx, chronograf.OrganizationQuery{ID: &o.ID}) - if err != nil { - return err - } - - return s.store.Update(ctx, o) -} - -func (s *OrganizationsStore) CreateDefault(ctx context.Context) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - return s.store.CreateDefault(ctx) -} - -func (s *OrganizationsStore) DefaultOrganization(ctx context.Context) (*chronograf.Organization, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - return s.store.DefaultOrganization(ctx) -} diff --git a/chronograf/organizations/organizations_test.go b/chronograf/organizations/organizations_test.go deleted file mode 100644 index cb28b1803a6..00000000000 --- a/chronograf/organizations/organizations_test.go +++ /dev/null @@ -1,346 +0,0 @@ -package organizations_test - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/organizations" -) - -// IgnoreFields is used because ID cannot be predicted reliably -// EquateEmpty is used because we want nil slices, arrays, and maps to be equal to the empty map -var organizationCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(chronograf.Organization{}, "ID"), -} - -func TestOrganizations_All(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - organization string - } - tests := []struct { - name string - args args - fields fields - want []chronograf.Organization - wantErr bool - }{ - { - name: "No Organizations", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - AllF: func(ctx context.Context) ([]chronograf.Organization, error) { - return nil, fmt.Errorf("no Organizations") - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "Default", - }, nil - }, - }, - }, - wantErr: true, - }, - { - name: "All Organizations", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "Default", - }, nil - }, - AllF: func(ctx context.Context) ([]chronograf.Organization, error) { - return []chronograf.Organization{ - { - Name: "howdy", - ID: "1337", - }, - { - Name: "doody", - ID: "1447", - }, - }, nil - }, - }, - }, - args: args{ - organization: "1337", - }, - want: []chronograf.Organization{ - { - Name: "howdy", - ID: "1337", - }, - { - Name: "Default", - ID: "0", - }, - }, - }, - } - for _, tt := range tests { - s := organizations.NewOrganizationsStore(tt.fields.OrganizationsStore, tt.args.organization) - ctx := context.WithValue(context.Background(), organizations.ContextKey, tt.args.organization) - gots, err := s.All(ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - for i, got := range gots { - if diff := cmp.Diff(got, tt.want[i], organizationCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.All():\n-got/+want\ndiff %s", tt.name, diff) - } - } - } -} - -func TestOrganizations_Add(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - organizationID string - ctx context.Context - organization *chronograf.Organization - } - tests := []struct { - name string - args args - fields fields - want *chronograf.Organization - wantErr bool - }{ - { - name: "Add Organization", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - AddF: func(ctx context.Context, s *chronograf.Organization) (*chronograf.Organization, error) { - return s, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1229", - Name: "howdy", - }, nil - }, - }, - }, - args: args{ - organizationID: "1229", - ctx: context.Background(), - organization: &chronograf.Organization{ - Name: "howdy", - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - s := organizations.NewOrganizationsStore(tt.fields.OrganizationsStore, tt.args.organizationID) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organizationID) - d, err := s.Add(tt.args.ctx, tt.args.organization) - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if tt.wantErr { - continue - } - got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{ID: &d.ID}) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, organizationCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Add():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestOrganizations_Delete(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - organizationID string - ctx context.Context - organization *chronograf.Organization - } - tests := []struct { - name string - fields fields - args args - addFirst bool - wantErr bool - }{ - { - name: "Delete organization", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - DeleteF: func(ctx context.Context, s *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1229", - Name: "howdy", - }, nil - }, - }, - }, - args: args{ - organizationID: "1229", - ctx: context.Background(), - organization: &chronograf.Organization{ - ID: "1229", - Name: "howdy", - }, - }, - addFirst: true, - }, - } - for _, tt := range tests { - s := organizations.NewOrganizationsStore(tt.fields.OrganizationsStore, tt.args.organizationID) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organizationID) - err := s.Delete(tt.args.ctx, tt.args.organization) - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - } -} - -func TestOrganizations_Get(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - organizationID string - ctx context.Context - organization *chronograf.Organization - } - tests := []struct { - name string - fields fields - args args - want *chronograf.Organization - wantErr bool - }{ - { - name: "Get Organization", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1337", - Name: "howdy", - }, nil - }, - }, - }, - args: args{ - organizationID: "1337", - ctx: context.Background(), - organization: &chronograf.Organization{ - ID: "1337", - Name: "howdy", - }, - }, - want: &chronograf.Organization{ - ID: "1337", - Name: "howdy", - }, - }, - } - for _, tt := range tests { - s := organizations.NewOrganizationsStore(tt.fields.OrganizationsStore, tt.args.organizationID) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organizationID) - got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{ID: &tt.args.organization.ID}) - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(got, tt.want, organizationCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestOrganizations_Update(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - organizationID string - ctx context.Context - organization *chronograf.Organization - name string - } - tests := []struct { - name string - fields fields - args args - want *chronograf.Organization - addFirst bool - wantErr bool - }{ - { - name: "Update Organization Name", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - UpdateF: func(ctx context.Context, s *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1229", - Name: "doody", - }, nil - }, - }, - }, - args: args{ - organizationID: "1229", - ctx: context.Background(), - organization: &chronograf.Organization{ - ID: "1229", - Name: "howdy", - }, - name: "doody", - }, - want: &chronograf.Organization{ - Name: "doody", - }, - addFirst: true, - }, - } - for _, tt := range tests { - if tt.args.name != "" { - tt.args.organization.Name = tt.args.name - } - s := organizations.NewOrganizationsStore(tt.fields.OrganizationsStore, tt.args.organizationID) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organizationID) - err := s.Update(tt.args.ctx, tt.args.organization) - if (err != nil) != tt.wantErr { - t.Errorf("%q. OrganizationsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{ID: &tt.args.organization.ID}) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, organizationCmpOptions...); diff != "" { - t.Errorf("%q. OrganizationsStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} diff --git a/chronograf/organizations/servers.go b/chronograf/organizations/servers.go deleted file mode 100644 index 89bf1a8ed3d..00000000000 --- a/chronograf/organizations/servers.go +++ /dev/null @@ -1,111 +0,0 @@ -package organizations - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure that ServersStore implements chronograf.ServerStore -var _ chronograf.ServersStore = &ServersStore{} - -// ServersStore facade on a ServerStore that filters servers -// by organization. -type ServersStore struct { - store chronograf.ServersStore - organization string -} - -// NewServersStore creates a new ServersStore from an existing -// chronograf.ServerStore and an organization string -func NewServersStore(s chronograf.ServersStore, org string) *ServersStore { - return &ServersStore{ - store: s, - organization: org, - } -} - -// All retrieves all servers from the underlying ServerStore and filters them -// by organization. -func (s *ServersStore) All(ctx context.Context) ([]chronograf.Server, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - ds, err := s.store.All(ctx) - if err != nil { - return nil, err - } - - // This filters servers without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - servers := ds[:0] - for _, d := range ds { - if d.Organization == s.organization { - servers = append(servers, d) - } - } - - return servers, nil -} - -// Add creates a new Server in the ServersStore with server.Organization set to be the -// organization from the server store. -func (s *ServersStore) Add(ctx context.Context, d chronograf.Server) (chronograf.Server, error) { - err := validOrganization(ctx) - if err != nil { - return chronograf.Server{}, err - } - - d.Organization = s.organization - return s.store.Add(ctx, d) -} - -// Delete the server from ServersStore -func (s *ServersStore) Delete(ctx context.Context, d chronograf.Server) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - d, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Delete(ctx, d) -} - -// Get returns a Server if the id exists and belongs to the organization that is set. -func (s *ServersStore) Get(ctx context.Context, id int) (chronograf.Server, error) { - err := validOrganization(ctx) - if err != nil { - return chronograf.Server{}, err - } - - d, err := s.store.Get(ctx, id) - if err != nil { - return chronograf.Server{}, err - } - - if d.Organization != s.organization { - return chronograf.Server{}, chronograf.ErrServerNotFound - } - - return d, nil -} - -// Update the server in ServersStore. -func (s *ServersStore) Update(ctx context.Context, d chronograf.Server) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - _, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Update(ctx, d) -} diff --git a/chronograf/organizations/servers_test.go b/chronograf/organizations/servers_test.go deleted file mode 100644 index 30f91e2c178..00000000000 --- a/chronograf/organizations/servers_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package organizations_test - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/organizations" -) - -// IgnoreFields is used because ID cannot be predicted reliably -// EquateEmpty is used because we want nil slices, arrays, and maps to be equal to the empty map -var serverCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(chronograf.Server{}, "ID"), - cmpopts.IgnoreFields(chronograf.Server{}, "Active"), -} - -func TestServers_All(t *testing.T) { - type fields struct { - ServersStore chronograf.ServersStore - } - type args struct { - organization string - } - tests := []struct { - name string - args args - fields fields - want []chronograf.Server - wantErr bool - }{ - { - name: "No Servers", - fields: fields{ - ServersStore: &mocks.ServersStore{ - AllF: func(ctx context.Context) ([]chronograf.Server, error) { - return nil, fmt.Errorf("no Servers") - }, - }, - }, - wantErr: true, - }, - { - name: "All Servers", - fields: fields{ - ServersStore: &mocks.ServersStore{ - AllF: func(ctx context.Context) ([]chronograf.Server, error) { - return []chronograf.Server{ - { - Name: "howdy", - Organization: "1337", - }, - { - Name: "doody", - Organization: "1338", - }, - }, nil - }, - }, - }, - args: args{ - organization: "1337", - }, - want: []chronograf.Server{ - { - Name: "howdy", - Organization: "1337", - }, - }, - }, - } - for _, tt := range tests { - s := organizations.NewServersStore(tt.fields.ServersStore, tt.args.organization) - ctx := context.WithValue(context.Background(), organizations.ContextKey, tt.args.organization) - gots, err := s.All(ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. ServersStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - for i, got := range gots { - if diff := cmp.Diff(got, tt.want[i], serverCmpOptions...); diff != "" { - t.Errorf("%q. ServersStore.All():\n-got/+want\ndiff %s", tt.name, diff) - } - } - } -} - -func TestServers_Add(t *testing.T) { - type fields struct { - ServersStore chronograf.ServersStore - } - type args struct { - organization string - ctx context.Context - server chronograf.Server - } - tests := []struct { - name string - args args - fields fields - want chronograf.Server - wantErr bool - }{ - { - name: "Add Server", - fields: fields{ - ServersStore: &mocks.ServersStore{ - AddF: func(ctx context.Context, s chronograf.Server) (chronograf.Server, error) { - return s, nil - }, - GetF: func(ctx context.Context, id int) (chronograf.Server, error) { - return chronograf.Server{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - server: chronograf.Server{ - ID: 1229, - Name: "howdy", - }, - }, - want: chronograf.Server{ - Name: "howdy", - Organization: "1337", - }, - }, - } - for _, tt := range tests { - s := organizations.NewServersStore(tt.fields.ServersStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - d, err := s.Add(tt.args.ctx, tt.args.server) - if (err != nil) != tt.wantErr { - t.Errorf("%q. ServersStore.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - got, err := s.Get(tt.args.ctx, d.ID) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, serverCmpOptions...); diff != "" { - t.Errorf("%q. ServersStore.Add():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestServers_Delete(t *testing.T) { - type fields struct { - ServersStore chronograf.ServersStore - } - type args struct { - organization string - ctx context.Context - server chronograf.Server - } - tests := []struct { - name string - fields fields - args args - addFirst bool - wantErr bool - }{ - { - name: "Delete server", - fields: fields{ - ServersStore: &mocks.ServersStore{ - DeleteF: func(ctx context.Context, s chronograf.Server) error { - return nil - }, - GetF: func(ctx context.Context, id int) (chronograf.Server, error) { - return chronograf.Server{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - server: chronograf.Server{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - addFirst: true, - }, - } - for _, tt := range tests { - s := organizations.NewServersStore(tt.fields.ServersStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - err := s.Delete(tt.args.ctx, tt.args.server) - if (err != nil) != tt.wantErr { - t.Errorf("%q. ServersStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - } -} - -func TestServers_Get(t *testing.T) { - type fields struct { - ServersStore chronograf.ServersStore - } - type args struct { - organization string - ctx context.Context - server chronograf.Server - } - tests := []struct { - name string - fields fields - args args - want chronograf.Server - wantErr bool - }{ - { - name: "Get Server", - fields: fields{ - ServersStore: &mocks.ServersStore{ - GetF: func(ctx context.Context, id int) (chronograf.Server, error) { - return chronograf.Server{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - server: chronograf.Server{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - want: chronograf.Server{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - } - for _, tt := range tests { - s := organizations.NewServersStore(tt.fields.ServersStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - got, err := s.Get(tt.args.ctx, tt.args.server.ID) - if (err != nil) != tt.wantErr { - t.Errorf("%q. ServersStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(got, tt.want, serverCmpOptions...); diff != "" { - t.Errorf("%q. ServersStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestServers_Update(t *testing.T) { - type fields struct { - ServersStore chronograf.ServersStore - } - type args struct { - organization string - ctx context.Context - server chronograf.Server - name string - } - tests := []struct { - name string - fields fields - args args - want chronograf.Server - addFirst bool - wantErr bool - }{ - { - name: "Update Server Name", - fields: fields{ - ServersStore: &mocks.ServersStore{ - UpdateF: func(ctx context.Context, s chronograf.Server) error { - return nil - }, - GetF: func(ctx context.Context, id int) (chronograf.Server, error) { - return chronograf.Server{ - ID: 1229, - Name: "doody", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - server: chronograf.Server{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - name: "doody", - }, - want: chronograf.Server{ - Name: "doody", - Organization: "1337", - }, - addFirst: true, - }, - } - for _, tt := range tests { - if tt.args.name != "" { - tt.args.server.Name = tt.args.name - } - s := organizations.NewServersStore(tt.fields.ServersStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - err := s.Update(tt.args.ctx, tt.args.server) - if (err != nil) != tt.wantErr { - t.Errorf("%q. ServersStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - got, err := s.Get(tt.args.ctx, tt.args.server.ID) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, serverCmpOptions...); diff != "" { - t.Errorf("%q. ServersStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} diff --git a/chronograf/organizations/sources.go b/chronograf/organizations/sources.go deleted file mode 100644 index eadc761146e..00000000000 --- a/chronograf/organizations/sources.go +++ /dev/null @@ -1,112 +0,0 @@ -package organizations - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// ensure that SourcesStore implements chronograf.SourceStore -var _ chronograf.SourcesStore = &SourcesStore{} - -// SourcesStore facade on a SourceStore that filters sources -// by organization. -type SourcesStore struct { - store chronograf.SourcesStore - organization string -} - -// NewSourcesStore creates a new SourcesStore from an existing -// chronograf.SourceStore and an organization string -func NewSourcesStore(s chronograf.SourcesStore, org string) *SourcesStore { - return &SourcesStore{ - store: s, - organization: org, - } -} - -// All retrieves all sources from the underlying SourceStore and filters them -// by organization. -func (s *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - ds, err := s.store.All(ctx) - if err != nil { - return nil, err - } - - // This filters sources without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - sources := ds[:0] - for _, d := range ds { - if d.Organization == s.organization { - sources = append(sources, d) - } - } - - return sources, nil -} - -// Add creates a new Source in the SourcesStore with source.Organization set to be the -// organization from the source store. -func (s *SourcesStore) Add(ctx context.Context, d chronograf.Source) (chronograf.Source, error) { - err := validOrganization(ctx) - if err != nil { - return chronograf.Source{}, err - } - - d.Organization = s.organization - return s.store.Add(ctx, d) -} - -// Delete the source from SourcesStore -func (s *SourcesStore) Delete(ctx context.Context, d chronograf.Source) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - d, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Delete(ctx, d) -} - -// Get returns a Source if the id exists and belongs to the organization that is set. -func (s *SourcesStore) Get(ctx context.Context, id int) (chronograf.Source, error) { - err := validOrganization(ctx) - if err != nil { - return chronograf.Source{}, err - } - - d, err := s.store.Get(ctx, id) - if err != nil { - return chronograf.Source{}, err - } - - if d.Organization != s.organization { - return chronograf.Source{}, chronograf.ErrSourceNotFound - } - - return d, nil -} - -// Update the source in SourcesStore. -func (s *SourcesStore) Update(ctx context.Context, d chronograf.Source) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - _, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Update(ctx, d) -} diff --git a/chronograf/organizations/sources_test.go b/chronograf/organizations/sources_test.go deleted file mode 100644 index 505104fa0ab..00000000000 --- a/chronograf/organizations/sources_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package organizations_test - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/organizations" -) - -// IgnoreFields is used because ID cannot be predicted reliably -// EquateEmpty is used because we want nil slices, arrays, and maps to be equal to the empty map -var sourceCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(chronograf.Source{}, "ID"), - cmpopts.IgnoreFields(chronograf.Source{}, "Default"), -} - -func TestSources_All(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - organization string - } - tests := []struct { - name string - args args - fields fields - want []chronograf.Source - wantErr bool - }{ - { - name: "No Sources", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AllF: func(ctx context.Context) ([]chronograf.Source, error) { - return nil, fmt.Errorf("no Sources") - }, - }, - }, - wantErr: true, - }, - { - name: "All Sources", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AllF: func(ctx context.Context) ([]chronograf.Source, error) { - return []chronograf.Source{ - { - Name: "howdy", - Organization: "1337", - }, - { - Name: "doody", - Organization: "1338", - }, - }, nil - }, - }, - }, - args: args{ - organization: "1337", - }, - want: []chronograf.Source{ - { - Name: "howdy", - Organization: "1337", - }, - }, - }, - } - for _, tt := range tests { - s := organizations.NewSourcesStore(tt.fields.SourcesStore, tt.args.organization) - ctx := context.WithValue(context.Background(), organizations.ContextKey, tt.args.organization) - gots, err := s.All(ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. SourcesStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - for i, got := range gots { - if diff := cmp.Diff(got, tt.want[i], sourceCmpOptions...); diff != "" { - t.Errorf("%q. SourcesStore.All():\n-got/+want\ndiff %s", tt.name, diff) - } - } - } -} - -func TestSources_Add(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - organization string - ctx context.Context - source chronograf.Source - } - tests := []struct { - name string - args args - fields fields - want chronograf.Source - wantErr bool - }{ - { - name: "Add Source", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AddF: func(ctx context.Context, s chronograf.Source) (chronograf.Source, error) { - return s, nil - }, - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - source: chronograf.Source{ - ID: 1229, - Name: "howdy", - }, - }, - want: chronograf.Source{ - Name: "howdy", - Organization: "1337", - }, - }, - } - for _, tt := range tests { - s := organizations.NewSourcesStore(tt.fields.SourcesStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - d, err := s.Add(tt.args.ctx, tt.args.source) - if (err != nil) != tt.wantErr { - t.Errorf("%q. SourcesStore.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - got, err := s.Get(tt.args.ctx, d.ID) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, sourceCmpOptions...); diff != "" { - t.Errorf("%q. SourcesStore.Add():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestSources_Delete(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - organization string - ctx context.Context - source chronograf.Source - } - tests := []struct { - name string - fields fields - args args - addFirst bool - wantErr bool - }{ - { - name: "Delete source", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - DeleteF: func(ctx context.Context, s chronograf.Source) error { - return nil - }, - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - source: chronograf.Source{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - addFirst: true, - }, - } - for _, tt := range tests { - s := organizations.NewSourcesStore(tt.fields.SourcesStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - err := s.Delete(tt.args.ctx, tt.args.source) - if (err != nil) != tt.wantErr { - t.Errorf("%q. SourcesStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - } -} - -func TestSources_Get(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - organization string - ctx context.Context - source chronograf.Source - } - tests := []struct { - name string - fields fields - args args - want chronograf.Source - wantErr bool - }{ - { - name: "Get Source", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - source: chronograf.Source{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - want: chronograf.Source{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - }, - } - for _, tt := range tests { - s := organizations.NewSourcesStore(tt.fields.SourcesStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - got, err := s.Get(tt.args.ctx, tt.args.source.ID) - if (err != nil) != tt.wantErr { - t.Errorf("%q. SourcesStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(got, tt.want, sourceCmpOptions...); diff != "" { - t.Errorf("%q. SourcesStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestSources_Update(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - organization string - ctx context.Context - source chronograf.Source - name string - } - tests := []struct { - name string - fields fields - args args - want chronograf.Source - addFirst bool - wantErr bool - }{ - { - name: "Update Source Name", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - UpdateF: func(ctx context.Context, s chronograf.Source) error { - return nil - }, - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1229, - Name: "doody", - Organization: "1337", - }, nil - }, - }, - }, - args: args{ - organization: "1337", - ctx: context.Background(), - source: chronograf.Source{ - ID: 1229, - Name: "howdy", - Organization: "1337", - }, - name: "doody", - }, - want: chronograf.Source{ - Name: "doody", - Organization: "1337", - }, - addFirst: true, - }, - } - for _, tt := range tests { - if tt.args.name != "" { - tt.args.source.Name = tt.args.name - } - s := organizations.NewSourcesStore(tt.fields.SourcesStore, tt.args.organization) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.organization) - err := s.Update(tt.args.ctx, tt.args.source) - if (err != nil) != tt.wantErr { - t.Errorf("%q. SourcesStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - got, err := s.Get(tt.args.ctx, tt.args.source.ID) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(got, tt.want, sourceCmpOptions...); diff != "" { - t.Errorf("%q. SourcesStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} diff --git a/chronograf/organizations/users.go b/chronograf/organizations/users.go deleted file mode 100644 index bb2a849e888..00000000000 --- a/chronograf/organizations/users.go +++ /dev/null @@ -1,284 +0,0 @@ -package organizations - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Ensure UsersStore implements chronograf.UsersStore. -var _ chronograf.UsersStore = &UsersStore{} - -// UsersStore facade on a UserStore that filters a users roles -// by organization. -// -// The high level idea here is to use the same underlying store for all users. -// In particular, this is done by having all the users Roles field be a set of -// all of the users roles in all organizations. Each CRUD method here takes care -// to ensure that the only roles that are modified are the roles for the organization -// that was provided on the UsersStore. -type UsersStore struct { - organization string - store chronograf.UsersStore -} - -// NewUsersStore creates a new UsersStore from an existing -// chronograf.UserStore and an organization string -func NewUsersStore(s chronograf.UsersStore, org string) *UsersStore { - return &UsersStore{ - store: s, - organization: org, - } -} - -// validOrganizationRoles ensures that each User Role has both an associated Organization and a Name -func validOrganizationRoles(orgID string, u *chronograf.User) error { - if u == nil || u.Roles == nil { - return nil - } - for _, r := range u.Roles { - if r.Organization == "" { - return fmt.Errorf("user role must have an Organization") - } - if r.Organization != orgID { - return fmt.Errorf("organizationID %s does not match %s", r.Organization, orgID) - } - if r.Name == "" { - return fmt.Errorf("user role must have a Name") - } - } - return nil -} - -// Get searches the UsersStore for using the query. -// The roles returned on the user are filtered to only contain roles that are for the organization -// specified on the organization store. -func (s *UsersStore) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - usr, err := s.store.Get(ctx, q) - if err != nil { - return nil, err - } - - // This filters a users roles so that the resulting struct only contains roles - // from the organization on the UsersStore. - roles := usr.Roles[:0] - for _, r := range usr.Roles { - if r.Organization == s.organization { - roles = append(roles, r) - } - } - - if len(roles) == 0 { - // This means that the user does not belong to the organization - // and therefore, is not found. - return nil, chronograf.ErrUserNotFound - } - - usr.Roles = roles - return usr, nil -} - -// Add creates a new User in the UsersStore. It validates that the user provided only -// has roles for the organization set on the UsersStore. -// If a user is not found in the underlying, it calls the underlying UsersStore Add method. -// If a user is found, it removes any existing roles a user has for an organization and appends -// the roles specified on the provided user and calls the uderlying UsersStore Update method. -func (s *UsersStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - // Validates that the users roles are only for the current organization. - if err := validOrganizationRoles(s.organization, u); err != nil { - return nil, err - } - - // retrieve the user from the underlying store - usr, err := s.store.Get(ctx, chronograf.UserQuery{ - Name: &u.Name, - Provider: &u.Provider, - Scheme: &u.Scheme, - }) - - switch err { - case nil: - // If there is no error continue to the rest of the code - break - case chronograf.ErrUserNotFound: - // If user is not found in the backed store, attempt to add the user - return s.store.Add(ctx, u) - default: - // return the error - return nil, err - } - - // Filter the retrieved users roles so that the resulting struct only contains roles - // that are not from the organization on the UsersStore. - roles := usr.Roles[:0] - for _, r := range usr.Roles { - if r.Organization != s.organization { - roles = append(roles, r) - } - } - - // If the user already has a role in the organization then the user - // cannot be "created". - // This can be thought of as: - // (total # of roles a user has) - (# of roles not in the organization) = (# of roles in organization) - // if this value is greater than 1 the user cannot be "added". - numRolesInOrganization := len(usr.Roles) - len(roles) - if numRolesInOrganization > 0 { - return nil, chronograf.ErrUserAlreadyExists - } - - // Set the users roles to be the union of the roles set on the provided user - // and the user that was found in the underlying store - usr.Roles = append(roles, u.Roles...) - - // u.SuperAdmin == true is logically equivalent to u.SuperAdmin, however - // it is more clear on a conceptual level to check equality - // - // TODO(desa): this should go away with https://github.com/influxdata/influxdb/chronograf/issues/2207 - // I do not like checking super admin here. The organization users store should only be - // concerned about organizations. - // - // If the user being added already existed in a previous organization, and was already a SuperAdmin, - // then this ensures that they retain their SuperAdmin status. And if they weren't a SuperAdmin, and - // the user being added has been granted SuperAdmin status, they will be promoted - if u.SuperAdmin { - usr.SuperAdmin = true - } - - // Update the user in the underlying store - if err := s.store.Update(ctx, usr); err != nil { - return nil, err - } - - // Return the provided user with ID set - u.ID = usr.ID - return u, nil -} - -// Delete a user from the UsersStore. This is done by stripping a user of -// any roles it has in the organization speicified on the UsersStore. -func (s *UsersStore) Delete(ctx context.Context, usr *chronograf.User) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - // retrieve the user from the underlying store - u, err := s.store.Get(ctx, chronograf.UserQuery{ID: &usr.ID}) - if err != nil { - return err - } - - // Filter the retrieved users roles so that the resulting slice contains - // roles that are not scoped to the organization provided - roles := u.Roles[:0] - for _, r := range u.Roles { - if r.Organization != s.organization { - roles = append(roles, r) - } - } - u.Roles = roles - return s.store.Update(ctx, u) -} - -// Update a user in the UsersStore. -func (s *UsersStore) Update(ctx context.Context, usr *chronograf.User) error { - err := validOrganization(ctx) - if err != nil { - return err - } - - // Validates that the users roles are only for the current organization. - if err := validOrganizationRoles(s.organization, usr); err != nil { - return err - } - - // retrieve the user from the underlying store - u, err := s.store.Get(ctx, chronograf.UserQuery{ID: &usr.ID}) - if err != nil { - return err - } - - // Filter the retrieved users roles so that the resulting slice contains - // roles that are not scoped to the organization provided - roles := u.Roles[:0] - for _, r := range u.Roles { - if r.Organization != s.organization { - roles = append(roles, r) - } - } - - // Make a copy of the usr so that we dont modify the underlying add roles on to - // the user that was passed in - user := *usr - - // Set the users roles to be the union of the roles set on the provided user - // and the user that was found in the underlying store - user.Roles = append(roles, usr.Roles...) - - return s.store.Update(ctx, &user) -} - -// All returns all users where roles have been filters to be exclusively for -// the organization provided on the UsersStore. -func (s *UsersStore) All(ctx context.Context) ([]chronograf.User, error) { - err := validOrganization(ctx) - if err != nil { - return nil, err - } - - // retrieve all users from the underlying UsersStore - usrs, err := s.store.All(ctx) - if err != nil { - return nil, err - } - - // Filter users to only contain users that have at least one role - // in the provided organization. - us := usrs[:0] - for _, usr := range usrs { - roles := usr.Roles[:0] - // This filters a users roles so that the resulting struct only contains roles - // from the organization on the UsersStore. - for _, r := range usr.Roles { - if r.Organization == s.organization { - roles = append(roles, r) - } - } - if len(roles) != 0 { - // Only add users if they have a role in the associated organization - usr.Roles = roles - us = append(us, usr) - } - } - - return us, nil -} - -// Num returns the number of users in the UsersStore -// This is unperformant, but should rarely be used. -func (s *UsersStore) Num(ctx context.Context) (int, error) { - err := validOrganization(ctx) - if err != nil { - return 0, err - } - - // retrieve all users from the underlying UsersStore - usrs, err := s.All(ctx) - if err != nil { - return 0, err - } - - return len(usrs), nil -} diff --git a/chronograf/organizations/users_test.go b/chronograf/organizations/users_test.go deleted file mode 100644 index c8494c6b55c..00000000000 --- a/chronograf/organizations/users_test.go +++ /dev/null @@ -1,1101 +0,0 @@ -package organizations_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/organizations" -) - -// IgnoreFields is used because ID cannot be predicted reliably -// EquateEmpty is used because we want nil slices, arrays, and maps to be equal to the empty map -var userCmpOptions = cmp.Options{ - cmpopts.IgnoreFields(chronograf.User{}, "ID"), - cmpopts.EquateEmpty(), -} - -func TestUsersStore_Get(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - } - type args struct { - ctx context.Context - userID uint64 - orgID string - } - tests := []struct { - name string - fields fields - args args - want *chronograf.User - wantErr bool - }{ - { - name: "Get user with no role in organization", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "The HillBilliettas", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - userID: 1234, - orgID: "1336", - }, - wantErr: true, - }, - { - name: "Get user no organization set", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "The HillBilliettas", - }, - }, - }, nil - }, - }, - }, - args: args{ - userID: 1234, - ctx: context.Background(), - }, - wantErr: true, - }, - { - name: "Get user scoped to an organization", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "The HillBilliettas", - }, - { - Organization: "1336", - Name: "The BillHilliettos", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - userID: 1234, - orgID: "1336", - }, - want: &chronograf.User{ - Name: "billietta", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "The BillHilliettos", - }, - }, - }, - }, - } - for _, tt := range tests { - s := organizations.NewUsersStore(tt.fields.UsersStore, tt.args.orgID) - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.orgID) - got, err := s.Get(tt.args.ctx, chronograf.UserQuery{ID: &tt.args.userID}) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(got, tt.want, userCmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.Get():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestUsersStore_Add(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - } - type args struct { - ctx context.Context - u *chronograf.User - orgID string - } - tests := []struct { - name string - fields fields - args args - want *chronograf.User - wantErr bool - }{ - { - name: "Add new user - no org", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "editor", - }, - }, - }, - }, - wantErr: true, - }, - { - name: "Add new user", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return nil, chronograf.ErrUserNotFound - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "editor", - }, - }, - }, - orgID: "1336", - }, - want: &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "editor", - }, - }, - }, - }, - { - name: "Add non-new user without Role", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, - orgID: "1336", - }, - want: &chronograf.User{ - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, - }, - { - name: "Add non-new user with Role", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "editor", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "admin", - }, - }, - }, - orgID: "1336", - }, - want: &chronograf.User{ - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "admin", - }, - }, - }, - }, - { - name: "Add non-new user with Role. Stored user is not super admin. Provided user is super admin", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "editor", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "admin", - }, - }, - }, - orgID: "1336", - }, - want: &chronograf.User{ - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Organization: "1336", - Name: "admin", - }, - }, - }, - }, - { - name: "Add user that already exists", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "editor", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - ID: 1234, - Name: "docbrown", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "admin", - }, - }, - }, - orgID: "1337", - }, - wantErr: true, - }, - { - name: "Has invalid Role: missing Organization", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return nil, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - orgID: "1338", - u: &chronograf.User{ - Name: "henrietta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "editor", - }, - }, - }, - }, - wantErr: true, - }, - { - name: "Has invalid Role: missing Name", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return nil, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - orgID: "1337", - u: &chronograf.User{ - Name: "henrietta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - }, - }, - }, - }, - wantErr: true, - }, - { - name: "Has invalid Organization", - fields: fields{ - UsersStore: &mocks.UsersStore{}, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "henrietta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - chronograf.Role{}, - }, - }, - orgID: "1337", - }, - wantErr: true, - }, - { - name: "Organization does not match orgID", - fields: fields{ - UsersStore: &mocks.UsersStore{}, - }, - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "henrietta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - }, - orgID: "1337", - }, - wantErr: true, - }, - { - name: "Role Name not specified", - args: args{ - ctx: context.Background(), - u: &chronograf.User{ - Name: "henrietta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - }, - }, - }, - orgID: "1337", - }, - wantErr: true, - }, - } - for _, tt := range tests { - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.orgID) - s := organizations.NewUsersStore(tt.fields.UsersStore, tt.args.orgID) - - got, err := s.Add(tt.args.ctx, tt.args.u) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got == nil && tt.want == nil { - continue - } - if diff := cmp.Diff(got, tt.want, userCmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.Add():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestUsersStore_Delete(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - } - type args struct { - ctx context.Context - user *chronograf.User - orgID string - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "No such user", - fields: fields{ - UsersStore: &mocks.UsersStore{ - //AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - // return u, nil - //}, - //UpdateF: func(ctx context.Context, u *chronograf.User) error { - // return nil - //}, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return nil, chronograf.ErrUserNotFound - }, - }, - }, - args: args{ - ctx: context.Background(), - user: &chronograf.User{ - ID: 10, - }, - orgID: "1336", - }, - wantErr: true, - }, - { - name: "Derlete user", - fields: fields{ - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1234, - Name: "noone", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "The BillHilliettas", - }, - { - Organization: "1336", - Name: "The HillBilliettas", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - user: &chronograf.User{ - ID: 1234, - Name: "noone", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "The BillHilliettas", - }, - { - Organization: "1336", - Name: "The HillBilliettas", - }, - }, - }, - orgID: "1336", - }, - }, - } - for _, tt := range tests { - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.orgID) - s := organizations.NewUsersStore(tt.fields.UsersStore, tt.args.orgID) - if err := s.Delete(tt.args.ctx, tt.args.user); (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - } -} - -func TestUsersStore_Update(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - } - type args struct { - ctx context.Context - usr *chronograf.User - roles []chronograf.Role - superAdmin bool - orgID string - } - tests := []struct { - name string - fields fields - args args - want *chronograf.User - wantErr bool - }{ - { - name: "No such user", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return nil, chronograf.ErrUserNotFound - }, - }, - }, - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - ID: 10, - }, - orgID: "1338", - }, - wantErr: true, - }, - { - name: "Update user role", - fields: fields{ - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "viewer", - }, - { - Organization: "1338", - Name: "editor", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, - roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - orgID: "1338", - }, - want: &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - }, - }, - { - name: "Update user super admin", - fields: fields{ - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "viewer", - }, - { - Organization: "1338", - Name: "editor", - }, - }, - }, nil - }, - }, - }, - args: args{ - ctx: context.Background(), - usr: &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, - superAdmin: true, - orgID: "1338", - }, - want: &chronograf.User{ - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - }, - } - for _, tt := range tests { - tt.args.ctx = context.WithValue(tt.args.ctx, organizations.ContextKey, tt.args.orgID) - s := organizations.NewUsersStore(tt.fields.UsersStore, tt.args.orgID) - - if tt.args.roles != nil { - tt.args.usr.Roles = tt.args.roles - } - - if tt.args.superAdmin { - tt.args.usr.SuperAdmin = tt.args.superAdmin - } - - if err := s.Update(tt.args.ctx, tt.args.usr); (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - - // for the empty test - if tt.want == nil { - continue - } - - if diff := cmp.Diff(tt.args.usr, tt.want, userCmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.Update():\n-got/+want\ndiff %s", tt.name, diff) - } - - } -} - -func TestUsersStore_All(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - } - tests := []struct { - name string - fields fields - ctx context.Context - want []chronograf.User - wantRaw []chronograf.User - orgID string - wantErr bool - }{ - { - name: "No users", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AllF: func(ctx context.Context) ([]chronograf.User, error) { - return []chronograf.User{ - { - Name: "howdy", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "viewer", - }, - { - Organization: "1336", - Name: "viewer", - }, - }, - }, - { - Name: "doody2", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "editor", - }, - }, - }, - { - Name: "doody", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - }, - }, nil - }, - }, - }, - ctx: context.Background(), - orgID: "2330", - }, - { - name: "get all users", - orgID: "1338", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AllF: func(ctx context.Context) ([]chronograf.User, error) { - return []chronograf.User{ - { - Name: "howdy", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "viewer", - }, - { - Organization: "1336", - Name: "viewer", - }, - }, - }, - { - Name: "doody2", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "editor", - }, - }, - }, - { - Name: "doody", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - }, - }, nil - }, - }, - }, - ctx: context.Background(), - want: []chronograf.User{ - { - Name: "howdy", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "viewer", - }, - }, - }, - { - Name: "doody", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - }, - }, - }, - } - for _, tt := range tests { - tt.ctx = context.WithValue(tt.ctx, organizations.ContextKey, tt.orgID) - for _, u := range tt.wantRaw { - tt.fields.UsersStore.Add(tt.ctx, &u) - } - s := organizations.NewUsersStore(tt.fields.UsersStore, tt.orgID) - gots, err := s.All(tt.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(gots, tt.want, userCmpOptions...); diff != "" { - t.Errorf("%q. UsersStore.All():\n-got/+want\ndiff %s", tt.name, diff) - } - } -} - -func TestUsersStore_Num(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - } - tests := []struct { - name string - fields fields - ctx context.Context - orgID string - want int - wantErr bool - }{ - { - name: "No users", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AllF: func(ctx context.Context) ([]chronograf.User, error) { - return []chronograf.User{ - { - Name: "howdy", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "viewer", - }, - { - Organization: "1336", - Name: "viewer", - }, - }, - }, - { - Name: "doody2", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "editor", - }, - }, - }, - { - Name: "doody", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - }, - }, nil - }, - }, - }, - ctx: context.Background(), - orgID: "2330", - }, - { - name: "get all users", - orgID: "1338", - fields: fields{ - UsersStore: &mocks.UsersStore{ - AllF: func(ctx context.Context) ([]chronograf.User, error) { - return []chronograf.User{ - { - Name: "howdy", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "viewer", - }, - { - Organization: "1336", - Name: "viewer", - }, - }, - }, - { - Name: "doody2", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1337", - Name: "editor", - }, - }, - }, - { - Name: "doody", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Organization: "1338", - Name: "editor", - }, - }, - }, - }, nil - }, - }, - }, - ctx: context.Background(), - want: 2, - }, - } - for _, tt := range tests { - tt.ctx = context.WithValue(tt.ctx, organizations.ContextKey, tt.orgID) - s := organizations.NewUsersStore(tt.fields.UsersStore, tt.orgID) - got, err := s.Num(tt.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("%q. UsersStore.Num() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if got != tt.want { - t.Errorf("%q. UsersStore.Num() = %d. want %d", tt.name, got, tt.want) - } - } -} diff --git a/chronograf/roles/roles.go b/chronograf/roles/roles.go deleted file mode 100644 index a5e63ad04ca..00000000000 --- a/chronograf/roles/roles.go +++ /dev/null @@ -1,66 +0,0 @@ -package roles - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type contextKey string - -// ContextKey is the key used to specify the -// role via context -const ContextKey = contextKey("role") - -func validRole(ctx context.Context) error { - // prevents panic in case of nil context - if ctx == nil { - return fmt.Errorf("expect non nil context") - } - role, ok := ctx.Value(ContextKey).(string) - // should never happen - if !ok { - return fmt.Errorf("expected role key to be a string") - } - switch role { - case MemberRoleName, ViewerRoleName, EditorRoleName, AdminRoleName: - return nil - default: - return fmt.Errorf("expected role key to be set") - } -} - -// Chronograf User Roles -const ( - MemberRoleName = "member" - ViewerRoleName = "viewer" - EditorRoleName = "editor" - AdminRoleName = "admin" - SuperAdminStatus = "superadmin" - - // Indicatior that the server should retrieve the default role for the organization. - WildcardRoleName = "*" -) - -var ( - // MemberRole is the role for a user who can only perform No operations. - MemberRole = chronograf.Role{ - Name: MemberRoleName, - } - - // ViewerRole is the role for a user who can only perform READ operations on Dashboards, Rules, Sources, and Servers, - ViewerRole = chronograf.Role{ - Name: ViewerRoleName, - } - - // EditorRole is the role for a user who can perform READ and WRITE operations on Dashboards, Rules, Sources, and Servers. - EditorRole = chronograf.Role{ - Name: EditorRoleName, - } - - // AdminRole is the role for a user who can perform READ and WRITE operations on Dashboards, Rules, Sources, Servers, and Users - AdminRole = chronograf.Role{ - Name: AdminRoleName, - } -) diff --git a/chronograf/roles/sources.go b/chronograf/roles/sources.go deleted file mode 100644 index e45c74865ce..00000000000 --- a/chronograf/roles/sources.go +++ /dev/null @@ -1,143 +0,0 @@ -package roles - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// NOTE: -// This code is currently unused. however, it has been left in place because we anticipate -// that it may be used in the future. It was originally developed as a misunderstanding of -// https://github.com/influxdata/influxdb/chronograf/issues/1915 - -// ensure that SourcesStore implements chronograf.SourceStore -var _ chronograf.SourcesStore = &SourcesStore{} - -// SourcesStore facade on a SourceStore that filters sources -// by minimum role required to access the source. -// -// The role is passed around on the context and set when the -// SourcesStore is instantiated. -type SourcesStore struct { - store chronograf.SourcesStore - role string -} - -// NewSourcesStore creates a new SourcesStore from an existing -// chronograf.SourceStore and an role string -func NewSourcesStore(s chronograf.SourcesStore, role string) *SourcesStore { - return &SourcesStore{ - store: s, - role: role, - } -} - -// All retrieves all sources from the underlying SourceStore and filters them -// by role. -func (s *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) { - err := validRole(ctx) - if err != nil { - return nil, err - } - - ds, err := s.store.All(ctx) - if err != nil { - return nil, err - } - - // This filters sources without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - sources := ds[:0] - for _, d := range ds { - if hasAuthorizedRole(d.Role, s.role) { - sources = append(sources, d) - } - } - - return sources, nil -} - -// Add creates a new Source in the SourcesStore with source.Role set to be the -// role from the source store. -func (s *SourcesStore) Add(ctx context.Context, d chronograf.Source) (chronograf.Source, error) { - err := validRole(ctx) - if err != nil { - return chronograf.Source{}, err - } - - return s.store.Add(ctx, d) -} - -// Delete the source from SourcesStore -func (s *SourcesStore) Delete(ctx context.Context, d chronograf.Source) error { - err := validRole(ctx) - if err != nil { - return err - } - - d, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Delete(ctx, d) -} - -// Get returns a Source if the id exists and belongs to the role that is set. -func (s *SourcesStore) Get(ctx context.Context, id int) (chronograf.Source, error) { - err := validRole(ctx) - if err != nil { - return chronograf.Source{}, err - } - - d, err := s.store.Get(ctx, id) - if err != nil { - return chronograf.Source{}, err - } - - if !hasAuthorizedRole(d.Role, s.role) { - return chronograf.Source{}, chronograf.ErrSourceNotFound - } - - return d, nil -} - -// Update the source in SourcesStore. -func (s *SourcesStore) Update(ctx context.Context, d chronograf.Source) error { - err := validRole(ctx) - if err != nil { - return err - } - - _, err = s.store.Get(ctx, d.ID) - if err != nil { - return err - } - - return s.store.Update(ctx, d) -} - -// hasAuthorizedRole checks that the role provided has at least -// the minimum role required. -func hasAuthorizedRole(sourceRole, providedRole string) bool { - switch sourceRole { - case ViewerRoleName: - switch providedRole { - case ViewerRoleName, EditorRoleName, AdminRoleName: - return true - } - case EditorRoleName: - switch providedRole { - case EditorRoleName, AdminRoleName: - return true - } - case AdminRoleName: - switch providedRole { - case AdminRoleName: - return true - } - } - - return false -} diff --git a/chronograf/roles/sources_test.go b/chronograf/roles/sources_test.go deleted file mode 100644 index 9a69718b3f2..00000000000 --- a/chronograf/roles/sources_test.go +++ /dev/null @@ -1,489 +0,0 @@ -package roles - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func TestSources_Get(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - role string - id int - } - type wants struct { - source chronograf.Source - err bool - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Get viewer source as viewer", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, nil - }, - }, - }, - args: args{ - role: "viewer", - id: 1, - }, - wants: wants{ - source: chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - }, - }, - { - name: "Get viewer source as editor", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, nil - }, - }, - }, - args: args{ - role: "editor", - id: 1, - }, - wants: wants{ - source: chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - }, - }, - { - name: "Get viewer source as admin", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, nil - }, - }, - }, - args: args{ - role: "admin", - id: 1, - }, - wants: wants{ - source: chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - }, - }, - { - name: "Get editor source as editor", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, nil - }, - }, - }, - args: args{ - role: "editor", - id: 1, - }, - wants: wants{ - source: chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, - }, - }, - { - name: "Get editor source as admin", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, nil - }, - }, - }, - args: args{ - role: "admin", - id: 1, - }, - wants: wants{ - source: chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, - }, - }, - { - name: "Get editor source as viewer - want error", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, nil - }, - }, - }, - args: args{ - role: "viewer", - id: 1, - }, - wants: wants{ - err: true, - }, - }, - { - name: "Get admin source as admin", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, nil - }, - }, - }, - args: args{ - role: "admin", - id: 1, - }, - wants: wants{ - source: chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, - }, - }, - { - name: "Get admin source as viewer - want error", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, nil - }, - }, - }, - args: args{ - role: "viewer", - id: 1, - }, - wants: wants{ - err: true, - }, - }, - { - name: "Get admin source as editor - want error", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, nil - }, - }, - }, - args: args{ - role: "editor", - id: 1, - }, - wants: wants{ - err: true, - }, - }, - { - name: "Get source bad context", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, nil - }, - }, - }, - args: args{ - role: "random role", - id: 1, - }, - wants: wants{ - err: true, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - store := NewSourcesStore(tt.fields.SourcesStore, tt.args.role) - - ctx := context.Background() - - if tt.args.role != "" { - ctx = context.WithValue(ctx, ContextKey, tt.args.role) - } - - source, err := store.Get(ctx, tt.args.id) - if (err != nil) != tt.wants.err { - t.Errorf("%q. Store.Sources().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - return - } - if diff := cmp.Diff(source, tt.wants.source); diff != "" { - t.Errorf("%q. Store.Sources().Get():\n-got/+want\ndiff %s", tt.name, diff) - } - }) - } -} - -func TestSources_All(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - role string - } - type wants struct { - sources []chronograf.Source - err bool - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Get viewer sources as viewer", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AllF: func(ctx context.Context) ([]chronograf.Source, error) { - return []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - { - ID: 2, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, - { - ID: 3, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, - }, nil - }, - }, - }, - args: args{ - role: "viewer", - }, - wants: wants{ - sources: []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - }, - }, - }, - { - name: "Get editor sources as editor", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AllF: func(ctx context.Context) ([]chronograf.Source, error) { - return []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - { - ID: 2, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, - { - ID: 3, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, - }, nil - }, - }, - }, - args: args{ - role: "editor", - }, - wants: wants{ - sources: []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - { - ID: 2, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, - }, - }, - }, - { - name: "Get admin sources as admin", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AllF: func(ctx context.Context) ([]chronograf.Source, error) { - return []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - { - ID: 2, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, - { - ID: 3, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, - }, nil - }, - }, - }, - args: args{ - role: "admin", - }, - wants: wants{ - sources: []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - Role: "viewer", - }, - { - ID: 2, - Name: "my sweet name", - Organization: "0", - Role: "editor", - }, - { - ID: 3, - Name: "my sweet name", - Organization: "0", - Role: "admin", - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - store := NewSourcesStore(tt.fields.SourcesStore, tt.args.role) - - ctx := context.Background() - - if tt.args.role != "" { - ctx = context.WithValue(ctx, ContextKey, tt.args.role) - } - - sources, err := store.All(ctx) - if (err != nil) != tt.wants.err { - t.Errorf("%q. Store.Sources().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - return - } - if diff := cmp.Diff(sources, tt.wants.sources); diff != "" { - t.Errorf("%q. Store.Sources().Get():\n-got/+want\ndiff %s", tt.name, diff) - } - }) - } -} diff --git a/chronograf/server/Makefile b/chronograf/server/Makefile deleted file mode 100644 index 1ebe969917a..00000000000 --- a/chronograf/server/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# List any generated files here -TARGETS = swagger_gen.go -# List any source files used to generate the targets here -SOURCES = swagger.json swagger.go -# List any directories that have their own Makefile here -SUBDIRS = - -# Default target -all: $(SUBDIRS) $(TARGETS) - -# Recurse into subdirs for same make goal -$(SUBDIRS): - $(MAKE) -C $@ $(MAKECMDGOALS) - -# Clean all targets recursively -clean: $(SUBDIRS) - rm -f $(TARGETS) - -# Define go generate if not already defined -GO_GENERATE := go generate - -# Run go generate for the targets -$(TARGETS): $(SOURCES) - $(GO_GENERATE) -x - -.PHONY: all clean $(SUBDIRS) diff --git a/chronograf/server/TODO.go b/chronograf/server/TODO.go deleted file mode 100644 index 242f0f8e479..00000000000 --- a/chronograf/server/TODO.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !assets - -package server - -import ( - "errors" -) - -// The functions defined in this file are placeholders when the binary is compiled -// without assets. - -// Asset returns an error stating no assets were included in the binary. -func Asset(string) ([]byte, error) { - return nil, errors.New("no assets included in binary") -} diff --git a/chronograf/server/annotations.go b/chronograf/server/annotations.go deleted file mode 100644 index 262ff80fd1c..00000000000 --- a/chronograf/server/annotations.go +++ /dev/null @@ -1,452 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -const ( - since = "since" - until = "until" - timeMilliFormat = "2006-01-02T15:04:05.999Z07:00" -) - -type annotationLinks struct { - Self string `json:"self"` // Self link mapping to this resource -} - -type annotationResponse struct { - ID string `json:"id"` // ID is the unique annotation identifier - StartTime string `json:"startTime"` // StartTime in RFC3339 of the start of the annotation - EndTime string `json:"endTime"` // EndTime in RFC3339 of the end of the annotation - Text string `json:"text"` // Text is the associated user-facing text describing the annotation - Type string `json:"type"` // Type describes the kind of annotation - Links annotationLinks `json:"links"` -} - -func newAnnotationResponse(src chronograf.Source, a *chronograf.Annotation) annotationResponse { - base := "/chronograf/v1/sources" - res := annotationResponse{ - ID: a.ID, - StartTime: a.StartTime.UTC().Format(timeMilliFormat), - EndTime: a.EndTime.UTC().Format(timeMilliFormat), - Text: a.Text, - Type: a.Type, - Links: annotationLinks{ - Self: fmt.Sprintf("%s/%d/annotations/%s", base, src.ID, a.ID), - }, - } - - if a.EndTime.IsZero() { - res.EndTime = "" - } - - return res -} - -type annotationsResponse struct { - Annotations []annotationResponse `json:"annotations"` -} - -func newAnnotationsResponse(src chronograf.Source, as []chronograf.Annotation) annotationsResponse { - annotations := make([]annotationResponse, len(as)) - for i, a := range as { - annotations[i] = newAnnotationResponse(src, &a) - } - return annotationsResponse{ - Annotations: annotations, - } -} - -func validAnnotationQuery(query url.Values) (startTime, stopTime time.Time, err error) { - start := query.Get(since) - if start == "" { - return time.Time{}, time.Time{}, fmt.Errorf("since parameter is required") - } - - startTime, err = time.Parse(timeMilliFormat, start) - if err != nil { - return - } - - // if until isn't stated, the default time is now - stopTime = time.Now() - stop := query.Get(until) - if stop != "" { - stopTime, err = time.Parse(timeMilliFormat, stop) - if err != nil { - return time.Time{}, time.Time{}, err - } - } - if startTime.After(stopTime) { - startTime, stopTime = stopTime, startTime - } - return startTime, stopTime, nil -} - -// Annotations returns all annotations within the annotations store -func (s *Service) Annotations(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - start, stop, err := validAnnotationQuery(r.URL.Query()) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, id) - if err != nil { - notFound(w, id, s.Logger) - return - } - - ts, err := s.TimeSeries(src) - if err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - if err = ts.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - store := influx.NewAnnotationStore(ts) - annotations, err := store.All(ctx, start, stop) - if err != nil { - msg := fmt.Errorf("error loading annotations: %v", err) - unknownErrorWithMessage(w, msg, s.Logger) - return - } - - res := newAnnotationsResponse(src, annotations) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// Annotation returns a specified annotation id within the annotations store -func (s *Service) Annotation(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - annoID, err := paramStr("aid", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, id) - if err != nil { - notFound(w, id, s.Logger) - return - } - - ts, err := s.TimeSeries(src) - if err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - if err = ts.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - store := influx.NewAnnotationStore(ts) - anno, err := store.Get(ctx, annoID) - if err != nil { - if err != chronograf.ErrAnnotationNotFound { - msg := fmt.Errorf("error loading annotation: %v", err) - unknownErrorWithMessage(w, msg, s.Logger) - return - } - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newAnnotationResponse(src, anno) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -type newAnnotationRequest struct { - StartTime time.Time - EndTime time.Time - Text string `json:"text,omitempty"` // Text is the associated user-facing text describing the annotation - Type string `json:"type,omitempty"` // Type describes the kind of annotation -} - -func (ar *newAnnotationRequest) UnmarshalJSON(data []byte) error { - type Alias newAnnotationRequest - aux := &struct { - StartTime string `json:"startTime"` // StartTime is the time in rfc3339 milliseconds - EndTime string `json:"endTime"` // EndTime is the time in rfc3339 milliseconds - *Alias - }{ - Alias: (*Alias)(ar), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - var err error - ar.StartTime, err = time.Parse(timeMilliFormat, aux.StartTime) - if err != nil { - return err - } - - ar.EndTime, err = time.Parse(timeMilliFormat, aux.EndTime) - if err != nil { - return err - } - - if ar.StartTime.After(ar.EndTime) { - ar.StartTime, ar.EndTime = ar.EndTime, ar.StartTime - } - - return nil -} - -func (ar *newAnnotationRequest) Annotation() *chronograf.Annotation { - return &chronograf.Annotation{ - StartTime: ar.StartTime, - EndTime: ar.EndTime, - Text: ar.Text, - Type: ar.Type, - } -} - -// NewAnnotation adds the annotation from a POST body to the annotations store -func (s *Service) NewAnnotation(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, id) - if err != nil { - notFound(w, id, s.Logger) - return - } - - ts, err := s.TimeSeries(src) - if err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - if err = ts.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - var req newAnnotationRequest - if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - store := influx.NewAnnotationStore(ts) - anno, err := store.Add(ctx, req.Annotation()) - if err != nil { - if err == chronograf.ErrUpstreamTimeout { - msg := "Timeout waiting for response" - Error(w, http.StatusRequestTimeout, msg, s.Logger) - return - } - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newAnnotationResponse(src, anno) - location(w, res.Links.Self) - encodeJSON(w, http.StatusCreated, res, s.Logger) -} - -// RemoveAnnotation removes the annotation from the time series source -func (s *Service) RemoveAnnotation(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - annoID, err := paramStr("aid", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, id) - if err != nil { - notFound(w, id, s.Logger) - return - } - - ts, err := s.TimeSeries(src) - if err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - if err = ts.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - store := influx.NewAnnotationStore(ts) - if err = store.Delete(ctx, annoID); err != nil { - if err == chronograf.ErrUpstreamTimeout { - msg := "Timeout waiting for response" - Error(w, http.StatusRequestTimeout, msg, s.Logger) - return - } - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -type updateAnnotationRequest struct { - StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the time in rfc3339 milliseconds - EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time in rfc3339 milliseconds - Text *string `json:"text,omitempty"` // Text is the associated user-facing text describing the annotation - Type *string `json:"type,omitempty"` // Type describes the kind of annotation -} - -// TODO: make sure that endtime is after starttime -func (u *updateAnnotationRequest) UnmarshalJSON(data []byte) error { - type Alias updateAnnotationRequest - aux := &struct { - StartTime *string `json:"startTime,omitempty"` - EndTime *string `json:"endTime,omitempty"` - *Alias - }{ - Alias: (*Alias)(u), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - if aux.StartTime != nil { - tm, err := time.Parse(timeMilliFormat, *aux.StartTime) - if err != nil { - return err - } - u.StartTime = &tm - } - - if aux.EndTime != nil { - tm, err := time.Parse(timeMilliFormat, *aux.EndTime) - if err != nil { - return err - } - u.EndTime = &tm - } - - // Update must have at least one field set - if u.StartTime == nil && u.EndTime == nil && u.Text == nil && u.Type == nil { - return fmt.Errorf("update request must have at least one field") - } - - return nil -} - -// UpdateAnnotation overwrite an existing annotation -func (s *Service) UpdateAnnotation(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - annoID, err := paramStr("aid", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, id) - if err != nil { - notFound(w, id, s.Logger) - return - } - - ts, err := s.TimeSeries(src) - if err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - if err = ts.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - store := influx.NewAnnotationStore(ts) - cur, err := store.Get(ctx, annoID) - if err != nil { - notFound(w, annoID, s.Logger) - return - } - - var req updateAnnotationRequest - if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - if req.StartTime != nil { - cur.StartTime = *req.StartTime - } - if req.EndTime != nil { - cur.EndTime = *req.EndTime - } - if req.Text != nil { - cur.Text = *req.Text - } - if req.Type != nil { - cur.Type = *req.Type - } - - if err = store.Update(ctx, cur); err != nil { - if err == chronograf.ErrUpstreamTimeout { - msg := "Timeout waiting for response" - Error(w, http.StatusRequestTimeout, msg, s.Logger) - return - } - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newAnnotationResponse(src, cur) - location(w, res.Links.Self) - encodeJSON(w, http.StatusOK, res, s.Logger) -} diff --git a/chronograf/server/annotations_test.go b/chronograf/server/annotations_test.go deleted file mode 100644 index 23984b4139a..00000000000 --- a/chronograf/server/annotations_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package server - -import ( - "bytes" - "context" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func TestService_Annotations(t *testing.T) { - type fields struct { - Store DataStore - TimeSeriesClient TimeSeriesClient - } - - tests := []struct { - name string - fields fields - w *httptest.ResponseRecorder - r *http.Request - ID string - want string - }{ - { - name: "error no id", - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations", bytes.NewReader([]byte(`howdy`))), - want: `{"code":422,"message":"error converting ID "}`, - }, - { - name: "no since parameter", - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations", bytes.NewReader([]byte(`howdy`))), - want: `{"code":422,"message":"since parameter is required"}`, - }, - { - name: "invalid since parameter", - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=howdy", bytes.NewReader([]byte(`howdy`))), - want: `{"code":422,"message":"parsing time \"howdy\" as \"2006-01-02T15:04:05.999Z07:00\": cannot parse \"howdy\" as \"2006\""}`, - }, - { - name: "error is returned when get is an error", - fields: fields{ - Store: &mocks.Store{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{}, fmt.Errorf("error") - }, - }, - }, - }, - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))), - want: `{"code":404,"message":"ID 1 not found"}`, - }, - { - name: "error is returned connect is an error", - fields: fields{ - Store: &mocks.Store{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: ID, - }, nil - }, - }, - }, - TimeSeriesClient: &mocks.TimeSeries{ - ConnectF: func(context.Context, *chronograf.Source) error { - return fmt.Errorf("error)") - }, - }, - }, - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))), - want: `{"code":400,"message":"unable to connect to source 1: error)"}`, - }, - { - name: "error returned when annotations are invalid", - fields: fields{ - Store: &mocks.Store{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: ID, - }, nil - }, - }, - }, - TimeSeriesClient: &mocks.TimeSeries{ - ConnectF: func(context.Context, *chronograf.Source) error { - return nil - }, - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`{[]}`, nil), nil - }, - }, - }, - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))), - want: `{"code":500,"message":"unknown error: error loading annotations: invalid character '[' looking for beginning of object key string"}`, - }, - { - name: "error is returned connect is an error", - fields: fields{ - Store: &mocks.Store{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: ID, - }, nil - }, - }, - }, - TimeSeriesClient: &mocks.TimeSeries{ - ConnectF: func(context.Context, *chronograf.Source) error { - return nil - }, - QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse(`[ - { - "series": [ - { - "name": "annotations", - "columns": [ - "time", - "start_time", - "modified_time_ns", - "text", - "type", - "id" - ], - "values": [ - [ - 1516920177345000000, - 0, - 1516989242129417403, - "mytext", - "mytype", - "ea0aa94b-969a-4cd5-912a-5db61d502268" - ] - ] - } - ] - } - ]`, nil), nil - }, - }, - }, - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))), - want: `{"annotations":[{"id":"ea0aa94b-969a-4cd5-912a-5db61d502268","startTime":"1970-01-01T00:00:00Z","endTime":"2018-01-25T22:42:57.345Z","text":"mytext","type":"mytype","links":{"self":"/chronograf/v1/sources/1/annotations/ea0aa94b-969a-4cd5-912a-5db61d502268"}}]} -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.r = tt.r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.ID, - }, - })) - s := &Service{ - Store: tt.fields.Store, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: mocks.NewLogger(), - } - s.Annotations(tt.w, tt.r) - got := tt.w.Body.String() - if got != tt.want { - t.Errorf("Annotations() got != want:\n%s\n%s", got, tt.want) - } - }) - } -} diff --git a/chronograf/server/assets.go b/chronograf/server/assets.go deleted file mode 100644 index 16f0ded8bd8..00000000000 --- a/chronograf/server/assets.go +++ /dev/null @@ -1,58 +0,0 @@ -package server - -import ( - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/dist" -) - -const ( - // Dir is prefix of the assets in the bindata - Dir = "../ui/build" - // Default is the default item to load if 404 - Default = "../ui/build/index.html" - // DebugDir is the prefix of the assets in development mode - DebugDir = "ui/build" - // DebugDefault is the default item to load if 404 - DebugDefault = "ui/build/index.html" - // DefaultContentType is the content-type to return for the Default file - DefaultContentType = "text/html; charset=utf-8" -) - -// AssetsOpts configures the asset middleware -type AssetsOpts struct { - // Develop when true serves assets from ui/build directory directly; false will use internal bindata. - Develop bool - // Logger will log the asset served - Logger chronograf.Logger -} - -// Assets creates a middleware that will serve a single page app. -func Assets(opts AssetsOpts) http.Handler { - var assets chronograf.Assets - if opts.Develop { - assets = &dist.DebugAssets{ - Dir: DebugDir, - Default: DebugDefault, - } - } else { - assets = &dist.BindataAssets{ - Prefix: Dir, - Default: Default, - DefaultContentType: DefaultContentType, - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if opts.Logger != nil { - opts.Logger. - WithField("component", "server"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL). - Info("Serving assets") - } - assets.Handler().ServeHTTP(w, r) - }) -} diff --git a/chronograf/server/auth.go b/chronograf/server/auth.go deleted file mode 100644 index fa38e746226..00000000000 --- a/chronograf/server/auth.go +++ /dev/null @@ -1,256 +0,0 @@ -package server - -import ( - "context" - "fmt" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" - "github.com/influxdata/influxdb/v2/chronograf/organizations" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -// HasAuthorizedToken extracts the token from a request and validates it using the authenticator. -// It is used by routes that need access to the token to populate links request. -func HasAuthorizedToken(auth oauth2.Authenticator, r *http.Request) (oauth2.Principal, error) { - ctx := r.Context() - return auth.Validate(ctx, r) -} - -// AuthorizedToken extracts the token and validates; if valid the next handler -// will be run. The principal will be sent to the next handler via the request's -// Context. It is up to the next handler to determine if the principal has access. -// On failure, will return http.StatusForbidden. -func AuthorizedToken(auth oauth2.Authenticator, logger chronograf.Logger, next http.Handler) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - log := logger. - WithField("component", "token_auth"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL) - - ctx := r.Context() - // We do not check the authorization of the principal. Those - // served further down the chain should do so. - principal, err := auth.Validate(ctx, r) - if err != nil { - log.Error("Invalid principal") - w.WriteHeader(http.StatusForbidden) - return - } - - // If the principal is valid we will extend its lifespan - // into the future - principal, err = auth.Extend(ctx, w, principal) - if err != nil { - log.Error("Unable to extend principal") - w.WriteHeader(http.StatusForbidden) - return - } - - // Send the principal to the next handler - ctx = context.WithValue(ctx, oauth2.PrincipalKey, principal) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -// RawStoreAccess gives a super admin access to the data store without a facade. -func RawStoreAccess(logger chronograf.Logger, next http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - if isServer := hasServerContext(ctx); isServer { - next(w, r) - return - } - - log := logger. - WithField("component", "raw_store"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL) - - if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin { - r = r.WithContext(serverContext(ctx)) - } else { - log.Error("User making request is not a SuperAdmin") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - - next(w, r) - } -} - -// AuthorizedUser extracts the user name and provider from context. If the -// user and provider can be found on the context, we look up the user by their -// name and provider. If the user is found, we verify that the user has at at -// least the role supplied. -func AuthorizedUser( - store DataStore, - useAuth bool, - role string, - logger chronograf.Logger, - next http.HandlerFunc, -) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - serverCtx := serverContext(ctx) - - log := logger. - WithField("component", "role_auth"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL) - - defaultOrg, err := store.Organizations(serverCtx).DefaultOrganization(serverCtx) - if err != nil { - log.Error(fmt.Sprintf("Failed to retrieve the default organization: %v", err)) - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - - if !useAuth { - // If there is no auth, then set the organization id to be the default org id on context - // so that calls like hasOrganizationContext as used in Organization Config service - // method OrganizationConfig can successfully get the organization id - ctx = context.WithValue(ctx, organizations.ContextKey, defaultOrg.ID) - - // And if there is no auth, then give the user raw access to the DataStore - r = r.WithContext(serverContext(ctx)) - next(w, r) - return - } - - p, err := getValidPrincipal(ctx) - if err != nil { - log.Error("Failed to retrieve principal from context") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - scheme, err := getScheme(ctx) - if err != nil { - log.Error("Failed to retrieve scheme from context") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - - // This is as if the user was logged into the default organization - if p.Organization == "" { - p.Organization = defaultOrg.ID - } - - // validate that the organization exists - _, err = store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &p.Organization}) - if err != nil { - log.Error(fmt.Sprintf("Failed to retrieve organization %s from organizations store", p.Organization)) - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - ctx = context.WithValue(ctx, organizations.ContextKey, p.Organization) - // TODO: seems silly to look up a user twice - u, err := store.Users(serverCtx).Get(serverCtx, chronograf.UserQuery{ - Name: &p.Subject, - Provider: &p.Issuer, - Scheme: &scheme, - }) - - if err != nil { - log.Error("Failed to retrieve user") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - // In particular this is used by sever/users.go so that we know when and when not to - // allow users to make someone a super admin - ctx = context.WithValue(ctx, UserContextKey, u) - - if u.SuperAdmin { - // To access resources (servers, sources, databases, layouts) within a DataStore, - // an organization and a role are required even if you are a super admin or are - // not using auth. Every user's current organization is set on context to filter - // the resources accessed within a DataStore, including for super admin or when - // not using auth. In this way, a DataStore can treat all requests the same, - // including those from a super admin and when not using auth. - // - // As for roles, in the case of super admin or when not using auth, the user's - // role on context (though not on their JWT or user) is set to be admin. In order - // to access all resources belonging to their current organization. - ctx = context.WithValue(ctx, roles.ContextKey, roles.AdminRoleName) - r = r.WithContext(ctx) - next(w, r) - return - } - - u, err = store.Users(ctx).Get(ctx, chronograf.UserQuery{ - Name: &p.Subject, - Provider: &p.Issuer, - Scheme: &scheme, - }) - if err != nil { - log.Error("Failed to retrieve user") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - - if hasAuthorizedRole(u, role) { - if len(u.Roles) != 1 { - msg := `User %d has too many role in organization. User: %#v.Please report this log at https://github.com/influxdata/influxdb/chronograf/issues/new"` - log.Error(fmt.Sprint(msg, u.ID, u)) - unknownErrorWithMessage(w, fmt.Errorf("please have administrator check logs and report error"), logger) - return - } - // use the first role, since there should only ever be one - // for any particular organization and hasAuthorizedRole - // should ensure that at least one role for the org exists - ctx = context.WithValue(ctx, roles.ContextKey, u.Roles[0].Name) - r = r.WithContext(ctx) - next(w, r) - return - } - - Error(w, http.StatusForbidden, "User is not authorized", logger) - }) -} - -func hasAuthorizedRole(u *chronograf.User, role string) bool { - if u == nil { - return false - } - - switch role { - case roles.MemberRoleName: - for _, r := range u.Roles { - switch r.Name { - case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName: - return true - } - } - case roles.ViewerRoleName: - for _, r := range u.Roles { - switch r.Name { - case roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName: - return true - } - } - case roles.EditorRoleName: - for _, r := range u.Roles { - switch r.Name { - case roles.EditorRoleName, roles.AdminRoleName: - return true - } - } - case roles.AdminRoleName: - for _, r := range u.Roles { - switch r.Name { - case roles.AdminRoleName: - return true - } - } - case roles.SuperAdminStatus: - // SuperAdmins should have been authorized before this. - // This is only meant to restrict access for non-superadmins. - return false - } - - return false -} diff --git a/chronograf/server/auth_test.go b/chronograf/server/auth_test.go deleted file mode 100644 index 307517d9326..00000000000 --- a/chronograf/server/auth_test.go +++ /dev/null @@ -1,1950 +0,0 @@ -package server - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -func TestAuthorizedToken(t *testing.T) { - var tests = []struct { - Desc string - Code int - Principal oauth2.Principal - ValidateErr error - Expected string - }{ - { - Desc: "Error in validate", - Code: http.StatusForbidden, - ValidateErr: errors.New("error"), - }, - { - Desc: "Authorized ok", - Code: http.StatusOK, - Principal: oauth2.Principal{ - Subject: "Principal Strickland", - }, - Expected: "Principal Strickland", - }, - } - for _, test := range tests { - // next is a sentinel StatusOK and - // principal recorder. - var principal oauth2.Principal - next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - principal = r.Context().Value(oauth2.PrincipalKey).(oauth2.Principal) - }) - req, _ := http.NewRequest("GET", "", nil) - w := httptest.NewRecorder() - - a := &mocks.Authenticator{ - Principal: test.Principal, - ValidateErr: test.ValidateErr, - } - - logger := &chronograf.NoopLogger{} - handler := AuthorizedToken(a, logger, next) - handler.ServeHTTP(w, req) - if w.Code != test.Code { - t.Errorf("Status code expected: %d actual %d", test.Code, w.Code) - } else if principal != test.Principal { - t.Errorf("Principal mismatch expected: %s actual %s", test.Principal, principal) - } - } -} -func TestAuthorizedUser(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - } - type args struct { - principal *oauth2.Principal - scheme string - useAuth bool - role string - } - tests := []struct { - name string - fields fields - args args - hasOrganizationContext bool - hasSuperAdminContext bool - hasRoleContext bool - hasServerContext bool - authorized bool - }{ - { - name: "Not using auth", - fields: fields{ - UsersStore: &mocks.UsersStore{}, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - useAuth: false, - }, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: false, - hasServerContext: true, - authorized: true, - }, - { - name: "User with member role is member authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.MemberRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "member", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with viewer role is member authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.ViewerRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "member", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with editor role is member authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "member", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with admin role is member authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "member", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with viewer role is viewer authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.ViewerRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "viewer", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with editor role is viewer authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "viewer", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with admin role is viewer authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "viewer", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with viewer role is editor unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.ViewerRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "editor", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with editor role is editor authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "editor", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with admin role is editor authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "editor", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with viewer role is admin unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.ViewerRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with editor role is admin unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with admin role is admin authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: false, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "User with no role is viewer unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "view", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with no role is editor unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "editor", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with no role is admin unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with unknown role is viewer unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "sweet_role", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "viewer", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with unknown role is editor unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "sweet_role", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "editor", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with unknown role is admin unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "sweet_role", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with viewer role is SuperAdmin unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.ViewerRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "superadmin", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with editor role is SuperAdmin unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "superadmin", - useAuth: true, - }, - authorized: false, - }, - { - name: "User with admin role is SuperAdmin unauthorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "superadmin", - useAuth: true, - }, - authorized: false, - }, - { - name: "SuperAdmin is Viewer authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.MemberRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "viewer", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: true, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "SuperAdmin is Editor authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.MemberRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "editor", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: true, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "SuperAdmin is Admin authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.MemberRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: true, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "SuperAdmin is SuperAdmin authorized", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.MemberRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "superadmin", - useAuth: true, - }, - authorized: true, - hasOrganizationContext: true, - hasSuperAdminContext: true, - hasRoleContext: true, - hasServerContext: false, - }, - { - name: "Invalid principal – principal is nil", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: nil, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "Invalid principal - missing organization", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "Invalid principal - organization id not uint64", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1ee7", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "Failed to retrieve organization", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - switch *q.ID { - case "1338": - return &chronograf.Organization{ - ID: "1338", - Name: "The ShillBillThrilliettas", - }, nil - default: - return nil, chronograf.ErrOrganizationNotFound - } - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billysteve", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - { - name: "Failed to retrieve user", - fields: fields{ - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - switch *q.Name { - case "billysteve": - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - default: - return nil, chronograf.ErrUserNotFound - } - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - principal: &oauth2.Principal{ - Subject: "billietta", - Issuer: "google", - Organization: "1337", - }, - scheme: "oauth2", - role: "admin", - useAuth: true, - }, - authorized: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var authorized bool - var hasServerCtx bool - var hasSuperAdminCtx bool - var hasOrganizationCtx bool - var hasRoleCtx bool - next := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - hasServerCtx = hasServerContext(ctx) - hasSuperAdminCtx = hasSuperAdminContext(ctx) - _, hasOrganizationCtx = hasOrganizationContext(ctx) - _, hasRoleCtx = hasRoleContext(ctx) - authorized = true - } - fn := AuthorizedUser( - &Store{ - UsersStore: tt.fields.UsersStore, - OrganizationsStore: tt.fields.OrganizationsStore, - }, - tt.args.useAuth, - tt.args.role, - tt.fields.Logger, - next, - ) - - w := httptest.NewRecorder() - r := httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ) - if tt.args.principal == nil { - r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, nil)) - } else { - r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, *tt.args.principal)) - } - fn(w, r) - - if authorized != tt.authorized { - t.Errorf("%q. AuthorizedUser() = %v, expected %v", tt.name, authorized, tt.authorized) - } - - if !authorized && w.Code != http.StatusForbidden { - t.Errorf("%q. AuthorizedUser() Status Code = %v, expected %v", tt.name, w.Code, http.StatusForbidden) - } - - if hasServerCtx != tt.hasServerContext { - t.Errorf("%q. AuthorizedUser().Context().Server = %v, expected %v", tt.name, hasServerCtx, tt.hasServerContext) - } - - if hasSuperAdminCtx != tt.hasSuperAdminContext { - t.Errorf("%q. AuthorizedUser().Context().SuperAdmin = %v, expected %v", tt.name, hasSuperAdminCtx, tt.hasSuperAdminContext) - } - - if hasOrganizationCtx != tt.hasOrganizationContext { - t.Errorf("%q. AuthorizedUser.Context().Organization = %v, expected %v", tt.name, hasOrganizationCtx, tt.hasOrganizationContext) - } - - if hasRoleCtx != tt.hasRoleContext { - t.Errorf("%q. AuthorizedUser().Context().Role = %v, expected %v", tt.name, hasRoleCtx, tt.hasRoleContext) - } - - }) - } -} - -func TestRawStoreAccess(t *testing.T) { - type fields struct { - Logger chronograf.Logger - } - type args struct { - principal *oauth2.Principal - serverContext bool - user *chronograf.User - } - type wants struct { - authorized bool - hasServerContext bool - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "middleware already has server context", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - serverContext: true, - }, - wants: wants{ - authorized: true, - hasServerContext: true, - }, - }, - { - name: "user on context is a SuperAdmin", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - user: &chronograf.User{ - SuperAdmin: true, - }, - }, - wants: wants{ - authorized: true, - hasServerContext: true, - }, - }, - { - name: "user on context is a not SuperAdmin", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - }, - args: args{ - user: &chronograf.User{ - SuperAdmin: false, - }, - }, - wants: wants{ - authorized: false, - hasServerContext: false, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var authorized bool - var hasServerCtx bool - next := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - hasServerCtx = hasServerContext(ctx) - authorized = true - } - fn := RawStoreAccess( - tt.fields.Logger, - next, - ) - - w := httptest.NewRecorder() - url := "http://any.url" - r := httptest.NewRequest( - "GET", - url, - nil, - ) - if tt.args.principal == nil { - r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, nil)) - } else { - r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, *tt.args.principal)) - } - - if tt.args.serverContext { - r = r.WithContext(serverContext(r.Context())) - } - if tt.args.user != nil { - r = r.WithContext(context.WithValue(r.Context(), UserContextKey, tt.args.user)) - } - fn(w, r) - - if authorized != tt.wants.authorized { - t.Errorf("%q. RawStoreAccess() = %v, expected %v", tt.name, authorized, tt.wants.authorized) - } - - if !authorized && w.Code != http.StatusForbidden { - t.Errorf("%q. RawStoreAccess() Status Code = %v, expected %v", tt.name, w.Code, http.StatusForbidden) - } - - if hasServerCtx != tt.wants.hasServerContext { - t.Errorf("%q. RawStoreAccess().Context().Server = %v, expected %v", tt.name, hasServerCtx, tt.wants.hasServerContext) - } - - }) - } -} diff --git a/chronograf/server/builders.go b/chronograf/server/builders.go deleted file mode 100644 index c3d9519cef4..00000000000 --- a/chronograf/server/builders.go +++ /dev/null @@ -1,186 +0,0 @@ -package server - -import ( - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/canned" - "github.com/influxdata/influxdb/v2/chronograf/filestore" - "github.com/influxdata/influxdb/v2/chronograf/memdb" - "github.com/influxdata/influxdb/v2/chronograf/multistore" -) - -// LayoutBuilder is responsible for building Layouts -type LayoutBuilder interface { - Build(chronograf.LayoutsStore) (*multistore.Layouts, error) -} - -// MultiLayoutBuilder implements LayoutBuilder and will return a Layouts -type MultiLayoutBuilder struct { - Logger chronograf.Logger - UUID chronograf.ID - CannedPath string -} - -// Build will construct a Layouts of canned and db-backed personalized -// layouts -func (builder *MultiLayoutBuilder) Build(db chronograf.LayoutsStore) (*multistore.Layouts, error) { - // These apps are those handled from a directory - apps := filestore.NewApps(builder.CannedPath, builder.UUID, builder.Logger) - // These apps are statically compiled into chronograf - binApps := &canned.BinLayoutsStore{ - Logger: builder.Logger, - } - // Acts as a front-end to both the bolt layouts, filesystem layouts and binary statically compiled layouts. - // The idea here is that these stores form a hierarchy in which each is tried sequentially until - // the operation has success. So, the database is preferred over filesystem over binary data. - layouts := &multistore.Layouts{ - Stores: []chronograf.LayoutsStore{ - db, - apps, - binApps, - }, - } - - return layouts, nil -} - -// DashboardBuilder is responsible for building dashboards -type DashboardBuilder interface { - Build(chronograf.DashboardsStore) (*multistore.DashboardsStore, error) -} - -// MultiDashboardBuilder builds a DashboardsStore backed by bolt and the filesystem -type MultiDashboardBuilder struct { - Logger chronograf.Logger - ID chronograf.ID - Path string -} - -// Build will construct a Dashboard store of filesystem and db-backed dashboards -func (builder *MultiDashboardBuilder) Build(db chronograf.DashboardsStore) (*multistore.DashboardsStore, error) { - // These dashboards are those handled from a directory - files := filestore.NewDashboards(builder.Path, builder.ID, builder.Logger) - // Acts as a front-end to both the bolt dashboard and filesystem dashboards. - // The idea here is that these stores form a hierarchy in which each is tried sequentially until - // the operation has success. So, the database is preferred over filesystem - dashboards := &multistore.DashboardsStore{ - Stores: []chronograf.DashboardsStore{ - db, - files, - }, - } - - return dashboards, nil -} - -// SourcesBuilder builds a MultiSourceStore -type SourcesBuilder interface { - Build(chronograf.SourcesStore) (*multistore.SourcesStore, error) -} - -// MultiSourceBuilder implements SourcesBuilder -type MultiSourceBuilder struct { - InfluxDBURL string - InfluxDBUsername string - InfluxDBPassword string - - Logger chronograf.Logger - ID chronograf.ID - Path string -} - -// Build will return a MultiSourceStore -func (fs *MultiSourceBuilder) Build(db chronograf.SourcesStore) (*multistore.SourcesStore, error) { - // These dashboards are those handled from a directory - files := filestore.NewSources(fs.Path, fs.ID, fs.Logger) - - stores := []chronograf.SourcesStore{db, files} - - if fs.InfluxDBURL != "" { - influxStore := &memdb.SourcesStore{ - Source: &chronograf.Source{ - ID: 0, - Name: fs.InfluxDBURL, - Type: chronograf.InfluxDB, - Username: fs.InfluxDBUsername, - Password: fs.InfluxDBPassword, - URL: fs.InfluxDBURL, - Default: true, - }} - stores = append([]chronograf.SourcesStore{influxStore}, stores...) - } - sources := &multistore.SourcesStore{ - Stores: stores, - } - - return sources, nil -} - -// KapacitorBuilder builds a KapacitorStore -type KapacitorBuilder interface { - Build(chronograf.ServersStore) (*multistore.KapacitorStore, error) -} - -// MultiKapacitorBuilder implements KapacitorBuilder -type MultiKapacitorBuilder struct { - KapacitorURL string - KapacitorUsername string - KapacitorPassword string - - Logger chronograf.Logger - ID chronograf.ID - Path string -} - -// Build will return a multistore facade KapacitorStore over memdb and bolt -func (builder *MultiKapacitorBuilder) Build(db chronograf.ServersStore) (*multistore.KapacitorStore, error) { - // These dashboards are those handled from a directory - files := filestore.NewKapacitors(builder.Path, builder.ID, builder.Logger) - - stores := []chronograf.ServersStore{db, files} - - if builder.KapacitorURL != "" { - memStore := &memdb.KapacitorStore{ - Kapacitor: &chronograf.Server{ - ID: 0, - SrcID: 0, - Name: builder.KapacitorURL, - URL: builder.KapacitorURL, - Username: builder.KapacitorUsername, - Password: builder.KapacitorPassword, - }, - } - stores = append([]chronograf.ServersStore{memStore}, stores...) - } - kapacitors := &multistore.KapacitorStore{ - Stores: stores, - } - return kapacitors, nil -} - -// OrganizationBuilder is responsible for building dashboards -type OrganizationBuilder interface { - Build(chronograf.OrganizationsStore) (*multistore.OrganizationsStore, error) -} - -// MultiOrganizationBuilder builds a OrganizationsStore backed by bolt and the filesystem -type MultiOrganizationBuilder struct { - Logger chronograf.Logger - Path string -} - -// Build will construct a Organization store of filesystem and db-backed dashboards -func (builder *MultiOrganizationBuilder) Build(db chronograf.OrganizationsStore) (*multistore.OrganizationsStore, error) { - // These organization are those handled from a directory - files := filestore.NewOrganizations(builder.Path, builder.Logger) - // Acts as a front-end to both the bolt org and filesystem orgs. - // The idea here is that these stores form a hierarchy in which each is tried sequentially until - // the operation has success. So, the database is preferred over filesystem - orgs := &multistore.OrganizationsStore{ - Stores: []chronograf.OrganizationsStore{ - db, - files, - }, - } - - return orgs, nil -} diff --git a/chronograf/server/builders_test.go b/chronograf/server/builders_test.go deleted file mode 100644 index ccd0b055778..00000000000 --- a/chronograf/server/builders_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package server_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf/server" -) - -func TestLayoutBuilder(t *testing.T) { - var l server.LayoutBuilder = &server.MultiLayoutBuilder{} - layout, err := l.Build(nil) - if err != nil { - t.Fatalf("MultiLayoutBuilder can't build a MultiLayoutsStore: %v", err) - } - - if layout == nil { - t.Fatal("LayoutBuilder should have built a layout") - } -} - -func TestSourcesStoresBuilder(t *testing.T) { - var b server.SourcesBuilder = &server.MultiSourceBuilder{} - sources, err := b.Build(nil) - if err != nil { - t.Fatalf("MultiSourceBuilder can't build a MultiSourcesStore: %v", err) - } - if sources == nil { - t.Fatal("SourcesBuilder should have built a MultiSourceStore") - } -} diff --git a/chronograf/server/cells.go b/chronograf/server/cells.go deleted file mode 100644 index 1c1155afd1c..00000000000 --- a/chronograf/server/cells.go +++ /dev/null @@ -1,379 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - idgen "github.com/influxdata/influxdb/v2/chronograf/id" -) - -const ( - // DefaultWidth is used if not specified - DefaultWidth = 4 - // DefaultHeight is used if not specified - DefaultHeight = 4 -) - -type dashboardCellLinks struct { - Self string `json:"self"` // Self link mapping to this resource -} - -type dashboardCellResponse struct { - chronograf.DashboardCell - Links dashboardCellLinks `json:"links"` -} - -func newCellResponse(dID chronograf.DashboardID, cell chronograf.DashboardCell) dashboardCellResponse { - base := "/chronograf/v1/dashboards" - if cell.Queries == nil { - cell.Queries = []chronograf.DashboardQuery{} - } - if cell.CellColors == nil { - cell.CellColors = []chronograf.CellColor{} - } - - // Copy to handle race condition - newAxes := make(map[string]chronograf.Axis, len(cell.Axes)) - for k, v := range cell.Axes { - newAxes[k] = v - } - - // ensure x, y, and y2 axes always returned - for _, lbl := range []string{"x", "y", "y2"} { - if _, found := newAxes[lbl]; !found { - newAxes[lbl] = chronograf.Axis{ - Bounds: []string{"", ""}, - } - } - } - cell.Axes = newAxes - - return dashboardCellResponse{ - DashboardCell: cell, - Links: dashboardCellLinks{ - Self: fmt.Sprintf("%s/%d/cells/%s", base, dID, cell.ID), - }, - } -} - -func newCellResponses(dID chronograf.DashboardID, dcells []chronograf.DashboardCell) []dashboardCellResponse { - cells := make([]dashboardCellResponse, len(dcells)) - for i, cell := range dcells { - cells[i] = newCellResponse(dID, cell) - } - return cells -} - -// ValidDashboardCellRequest verifies that the dashboard cells have a query and -// have the correct axes specified -func ValidDashboardCellRequest(c *chronograf.DashboardCell) error { - if c == nil { - return fmt.Errorf("chronograf dashboard cell was nil") - } - - CorrectWidthHeight(c) - for _, q := range c.Queries { - if err := ValidateQueryConfig(&q.QueryConfig); err != nil { - return err - } - } - MoveTimeShift(c) - err := HasCorrectAxes(c) - if err != nil { - return err - } - if err = HasCorrectColors(c); err != nil { - return err - } - return HasCorrectLegend(c) -} - -// HasCorrectAxes verifies that only permitted axes exist within a DashboardCell -func HasCorrectAxes(c *chronograf.DashboardCell) error { - for label, axis := range c.Axes { - if !oneOf(label, "x", "y", "y2") { - return chronograf.ErrInvalidAxis - } - - if !oneOf(axis.Scale, "linear", "log", "") { - return chronograf.ErrInvalidAxis - } - - if !oneOf(axis.Base, "10", "2", "") { - return chronograf.ErrInvalidAxis - } - } - - return nil -} - -// HasCorrectColors verifies that the format of each color is correct -func HasCorrectColors(c *chronograf.DashboardCell) error { - for _, color := range c.CellColors { - if !oneOf(color.Type, "max", "min", "threshold", "text", "background", "scale") { - return chronograf.ErrInvalidColorType - } - if len(color.Hex) != 7 { - return chronograf.ErrInvalidColor - } - } - return nil -} - -// HasCorrectLegend verifies that the format of the legend is correct -func HasCorrectLegend(c *chronograf.DashboardCell) error { - // No legend set - if c.Legend.Type == "" && c.Legend.Orientation == "" { - return nil - } - - if c.Legend.Type == "" || c.Legend.Orientation == "" { - return chronograf.ErrInvalidLegend - } - if !oneOf(c.Legend.Orientation, "top", "bottom", "right", "left") { - return chronograf.ErrInvalidLegendOrient - } - - // Remember! if we add other types, update ErrInvalidLegendType - if !oneOf(c.Legend.Type, "static") { - return chronograf.ErrInvalidLegendType - } - return nil -} - -// oneOf reports whether a provided string is a member of a variadic list of -// valid options -func oneOf(prop string, validOpts ...string) bool { - for _, valid := range validOpts { - if prop == valid { - return true - } - } - return false -} - -// CorrectWidthHeight changes the cell to have at least the -// minimum width and height -func CorrectWidthHeight(c *chronograf.DashboardCell) { - if c.W < 1 { - c.W = DefaultWidth - } - if c.H < 1 { - c.H = DefaultHeight - } -} - -// MoveTimeShift moves TimeShift from the QueryConfig to the DashboardQuery -func MoveTimeShift(c *chronograf.DashboardCell) { - for i, query := range c.Queries { - query.Shifts = query.QueryConfig.Shifts - c.Queries[i] = query - } -} - -// AddQueryConfig updates a cell by converting InfluxQL into queryconfigs -// If influxql cannot be represented by a full query config, then, the -// query config's raw text is set to the command. -func AddQueryConfig(c *chronograf.DashboardCell) { - for i, q := range c.Queries { - qc := ToQueryConfig(q.Command) - qc.Shifts = append([]chronograf.TimeShift(nil), q.Shifts...) - q.Shifts = nil - q.QueryConfig = qc - c.Queries[i] = q - } -} - -// DashboardCells returns all cells from a dashboard within the store -func (s *Service) DashboardCells(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - e, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - boards := newDashboardResponse(e) - cells := boards.Cells - encodeJSON(w, http.StatusOK, cells, s.Logger) -} - -// NewDashboardCell adds a cell to an existing dashboard -func (s *Service) NewDashboardCell(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - var cell chronograf.DashboardCell - if err := json.NewDecoder(r.Body).Decode(&cell); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := ValidDashboardCellRequest(&cell); err != nil { - invalidData(w, err, s.Logger) - return - } - - ids := &idgen.UUID{} - cid, err := ids.Generate() - if err != nil { - msg := fmt.Sprintf("Error creating cell ID of dashboard %d: %v", id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - cell.ID = cid - - dash.Cells = append(dash.Cells, cell) - if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil { - msg := fmt.Sprintf("Error adding cell %s to dashboard %d: %v", cid, id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - boards := newDashboardResponse(dash) - for _, cell := range boards.Cells { - if cell.ID == cid { - encodeJSON(w, http.StatusOK, cell, s.Logger) - return - } - } -} - -// DashboardCellID gets a specific cell from an existing dashboard -func (s *Service) DashboardCellID(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - boards := newDashboardResponse(dash) - cid := httprouter.ParamsFromContext(ctx).ByName("cid") - for _, cell := range boards.Cells { - if cell.ID == cid { - encodeJSON(w, http.StatusOK, cell, s.Logger) - return - } - } - notFound(w, id, s.Logger) -} - -// RemoveDashboardCell removes a specific cell from an existing dashboard -func (s *Service) RemoveDashboardCell(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - cid := httprouter.ParamsFromContext(ctx).ByName("cid") - cellid := -1 - for i, cell := range dash.Cells { - if cell.ID == cid { - cellid = i - break - } - } - if cellid == -1 { - notFound(w, id, s.Logger) - return - } - - dash.Cells = append(dash.Cells[:cellid], dash.Cells[cellid+1:]...) - if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil { - msg := fmt.Sprintf("Error removing cell %s from dashboard %d: %v", cid, id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - w.WriteHeader(http.StatusNoContent) -} - -// ReplaceDashboardCell replaces a cell entirely within an existing dashboard -func (s *Service) ReplaceDashboardCell(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - cid := httprouter.ParamsFromContext(ctx).ByName("cid") - cellid := -1 - for i, cell := range dash.Cells { - if cell.ID == cid { - cellid = i - break - } - } - if cellid == -1 { - notFound(w, cid, s.Logger) - return - } - - var cell chronograf.DashboardCell - if err := json.NewDecoder(r.Body).Decode(&cell); err != nil { - invalidJSON(w, s.Logger) - return - } - - for i, a := range cell.Axes { - if len(a.Bounds) == 0 { - a.Bounds = []string{"", ""} - cell.Axes[i] = a - } - } - - if err := ValidDashboardCellRequest(&cell); err != nil { - invalidData(w, err, s.Logger) - return - } - cell.ID = cid - - dash.Cells[cellid] = cell - if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil { - msg := fmt.Sprintf("Error updating cell %s in dashboard %d: %v", cid, id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - res := newCellResponse(dash.ID, cell) - encodeJSON(w, http.StatusOK, res, s.Logger) -} diff --git a/chronograf/server/cells_test.go b/chronograf/server/cells_test.go deleted file mode 100644 index 811cc33e997..00000000000 --- a/chronograf/server/cells_test.go +++ /dev/null @@ -1,992 +0,0 @@ -package server - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func Test_Cells_CorrectAxis(t *testing.T) { - t.Parallel() - - axisTests := []struct { - name string - cell *chronograf.DashboardCell - shouldFail bool - }{ - { - name: "correct axes", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Bounds: []string{"0", "100"}, - }, - "y": chronograf.Axis{ - Bounds: []string{"0", "100"}, - }, - "y2": chronograf.Axis{ - Bounds: []string{"0", "100"}, - }, - }, - }, - }, - { - name: "invalid axes present", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "axis of evil": chronograf.Axis{ - Bounds: []string{"666", "666"}, - }, - "axis of awesome": chronograf.Axis{ - Bounds: []string{"1337", "31337"}, - }, - }, - }, - shouldFail: true, - }, - { - name: "linear scale value", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Scale: "linear", - Bounds: []string{"0", "100"}, - }, - }, - }, - }, - { - name: "log scale value", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Scale: "log", - Bounds: []string{"0", "100"}, - }, - }, - }, - }, - { - name: "invalid scale value", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Scale: "potatoes", - Bounds: []string{"0", "100"}, - }, - }, - }, - shouldFail: true, - }, - { - name: "base 10 axis", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Base: "10", - Bounds: []string{"0", "100"}, - }, - }, - }, - }, - { - name: "base 2 axis", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Base: "2", - Bounds: []string{"0", "100"}, - }, - }, - }, - }, - { - name: "invalid base", - cell: &chronograf.DashboardCell{ - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Base: "all your base are belong to us", - Bounds: []string{"0", "100"}, - }, - }, - }, - shouldFail: true, - }, - } - - for _, test := range axisTests { - t.Run(test.name, func(tt *testing.T) { - if err := HasCorrectAxes(test.cell); err != nil && !test.shouldFail { - t.Errorf("%q: Unexpected error: err: %s", test.name, err) - } else if err == nil && test.shouldFail { - t.Errorf("%q: Expected error and received none", test.name) - } - }) - } -} - -func Test_Service_DashboardCells(t *testing.T) { - cellsTests := []struct { - name string - reqURL *url.URL - ctxParams map[string]string - mockResponse []chronograf.DashboardCell - expected []chronograf.DashboardCell - expectedCode int - }{ - { - name: "happy path", - reqURL: &url.URL{ - Path: "/chronograf/v1/dashboards/1/cells", - }, - ctxParams: map[string]string{ - "id": "1", - }, - mockResponse: []chronograf.DashboardCell{}, - expected: []chronograf.DashboardCell{}, - expectedCode: http.StatusOK, - }, - { - name: "cell axes should always be \"x\", \"y\", and \"y2\"", - reqURL: &url.URL{ - Path: "/chronograf/v1/dashboards/1/cells", - }, - ctxParams: map[string]string{ - "id": "1", - }, - mockResponse: []chronograf.DashboardCell{ - { - ID: "3899be5a-f6eb-4347-b949-de2f4fbea859", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "CPU", - Type: "bar", - Queries: []chronograf.DashboardQuery{}, - Axes: map[string]chronograf.Axis{}, - }, - }, - expected: []chronograf.DashboardCell{ - { - ID: "3899be5a-f6eb-4347-b949-de2f4fbea859", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "CPU", - Type: "bar", - Queries: []chronograf.DashboardQuery{}, - CellColors: []chronograf.CellColor{}, - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - "y": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - "y2": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - }, - }, - }, - expectedCode: http.StatusOK, - }, - } - - for _, test := range cellsTests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - // setup context with params - params := httprouter.Params{} - for k, v := range test.ctxParams { - params = append(params, httprouter.Param{ - Key: k, - Value: v, - }) - } - ctx := context.WithValue( - context.Background(), - httprouter.ParamsKey, - params, - ) - - // setup response recorder and request - rr := httptest.NewRecorder() - req := httptest.NewRequest("GET", test.reqURL.RequestURI(), strings.NewReader("")).WithContext(ctx) - - // setup mock DashboardCells store and logger - tlog := &mocks.TestLogger{} - svc := &Service{ - Store: &mocks.Store{ - DashboardsStore: &mocks.DashboardsStore{ - GetF: func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: chronograf.DashboardID(1), - Cells: test.mockResponse, - Templates: []chronograf.Template{}, - Name: "empty dashboard", - }, nil - }, - }, - }, - Logger: tlog, - } - - // invoke DashboardCell handler - svc.DashboardCells(rr, req) - - // setup frame to decode response into - respFrame := []struct { - chronograf.DashboardCell - Links json.RawMessage `json:"links"` // ignore links - }{} - - // decode response - resp := rr.Result() - - if resp.StatusCode != test.expectedCode { - tlog.Dump(t) - t.Fatalf("%q - Status codes do not match. Want %d (%s), Got %d (%s)", test.name, test.expectedCode, http.StatusText(test.expectedCode), resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - if err := json.NewDecoder(resp.Body).Decode(&respFrame); err != nil { - t.Fatalf("%q - Error unmarshalling response body: err: %s", test.name, err) - } - - // extract actual - actual := []chronograf.DashboardCell{} - for _, rsp := range respFrame { - actual = append(actual, rsp.DashboardCell) - } - - // compare actual and expected - if !cmp.Equal(actual, test.expected) { - t.Fatalf("%q - Dashboard Cells do not match: diff: %s", test.name, cmp.Diff(actual, test.expected)) - } - }) - } -} - -func TestHasCorrectColors(t *testing.T) { - tests := []struct { - name string - c *chronograf.DashboardCell - wantErr bool - }{ - { - name: "min type is valid", - c: &chronograf.DashboardCell{ - CellColors: []chronograf.CellColor{ - { - Type: "min", - Hex: "#FFFFFF", - }, - }, - }, - }, - { - name: "max type is valid", - c: &chronograf.DashboardCell{ - CellColors: []chronograf.CellColor{ - { - Type: "max", - Hex: "#FFFFFF", - }, - }, - }, - }, - { - name: "threshold type is valid", - c: &chronograf.DashboardCell{ - CellColors: []chronograf.CellColor{ - { - Type: "threshold", - Hex: "#FFFFFF", - }, - }, - }, - }, - { - name: "invalid color type", - c: &chronograf.DashboardCell{ - CellColors: []chronograf.CellColor{ - { - Type: "unknown", - Hex: "#FFFFFF", - }, - }, - }, - wantErr: true, - }, - { - name: "invalid color hex", - c: &chronograf.DashboardCell{ - CellColors: []chronograf.CellColor{ - { - Type: "min", - Hex: "bad", - }, - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := HasCorrectColors(tt.c); (err != nil) != tt.wantErr { - t.Errorf("HasCorrectColors() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestService_ReplaceDashboardCell(t *testing.T) { - tests := []struct { - name string - DashboardsStore chronograf.DashboardsStore - ID string - CID string - w *httptest.ResponseRecorder - r *http.Request - want string - }{ - { - name: "update cell retains query config", - ID: "1", - CID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - DashboardsStore: &mocks.DashboardsStore{ - UpdateF: func(ctx context.Context, target chronograf.Dashboard) error { - return nil - }, - GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: ID, - Cells: []chronograf.DashboardCell{ - { - ID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - W: 4, - H: 4, - Name: "Untitled Cell", - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)", - QueryConfig: chronograf.QueryConfig{ - ID: "3cd3eaa4-a4b8-44b3-b69e-0c7bf6b91d9e", - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Alias: "mean_usage_user", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{ - "cpu": { - "ChristohersMBP2.lan", - }, - }, - GroupBy: chronograf.GroupBy{ - Time: "2s", - Tags: []string{}, - }, - AreTagsAccepted: true, - Fill: "null", - RawText: strPtr("SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)"), - Range: &chronograf.DurationRange{ - Lower: "now() - 15m"}, - Shifts: []chronograf.TimeShift{}, - }, - }, - }, - Axes: map[string]chronograf.Axis{ - "x": { - Bounds: []string{"", ""}, - }, - "y": { - Bounds: []string{"", ""}, - }, - "y2": { - Bounds: []string{"", ""}, - }, - }, - Type: "line", - CellColors: []chronograf.CellColor{ - { - ID: "0", - Type: "min", - Hex: "#00C9FF", - Name: "laser", - Value: "0", - }, - { - ID: "1", - Type: "max", - Hex: "#9394FF", - Name: "comet", - Value: "100", - }, - }, - }, - }, - }, nil - }, - }, - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(` - { - "i": "3c5c4102-fa40-4585-a8f9-917c77e37192", - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "name": "Untitled Cell", - "queries": [ - { - "queryConfig": { - "id": "3cd3eaa4-a4b8-44b3-b69e-0c7bf6b91d9e", - "database": "telegraf", - "measurement": "cpu", - "retentionPolicy": "autogen", - "fields": [ - { - "value": "mean", - "type": "func", - "alias": "mean_usage_user", - "args": [{"value": "usage_user", "type": "field", "alias": ""}] - } - ], - "tags": {"cpu": ["ChristohersMBP2.lan"]}, - "groupBy": {"time": "2s", "tags": []}, - "areTagsAccepted": true, - "fill": "null", - "rawText": - "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)", - "range": {"upper": "", "lower": "now() - 15m"}, - "shifts": [] - }, - "query": - "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)", - "source": null - } - ], - "axes": { - "x": { - "bounds": ["",""], - "label": "", - "prefix": "", - "suffix": "", - "base": "", - "scale": "" - }, - "y": { - "bounds": ["",""], - "label": "", - "prefix": "", - "suffix": "", - "base": "", - "scale": "" - }, - "y2": { - "bounds": ["",""], - "label": "", - "prefix": "", - "suffix": "", - "base": "", - "scale": "" - } - }, - "type": "line", - "colors": [ - {"type": "min", "hex": "#00C9FF", "id": "0", "name": "laser", "value": "0"}, - { - "type": "max", - "hex": "#9394FF", - "id": "1", - "name": "comet", - "value": "100" - } - ], - "links": { - "self": - "/chronograf/v1/dashboards/6/cells/3c5c4102-fa40-4585-a8f9-917c77e37192" - } - } - `))), - want: `{"i":"3c5c4102-fa40-4585-a8f9-917c77e37192","x":0,"y":0,"w":4,"h":4,"name":"Untitled Cell","queries":[{"query":"SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)","queryConfig":{"id":"3cd3eaa4-a4b8-44b3-b69e-0c7bf6b91d9e","database":"telegraf","measurement":"cpu","retentionPolicy":"autogen","fields":[{"value":"mean","type":"func","alias":"mean_usage_user","args":[{"value":"usage_user","type":"field","alias":""}]}],"tags":{"cpu":["ChristohersMBP2.lan"]},"groupBy":{"time":"2s","tags":[]},"areTagsAccepted":true,"fill":"null","rawText":"SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)","range":{"upper":"","lower":"now() - 15m"},"shifts":[]},"source":""}],"axes":{"x":{"bounds":["",""],"label":"","prefix":"","suffix":"","base":"","scale":""},"y":{"bounds":["",""],"label":"","prefix":"","suffix":"","base":"","scale":""},"y2":{"bounds":["",""],"label":"","prefix":"","suffix":"","base":"","scale":""}},"type":"line","colors":[{"id":"0","type":"min","hex":"#00C9FF","name":"laser","value":"0"},{"id":"1","type":"max","hex":"#9394FF","name":"comet","value":"100"}],"legend":{},"tableOptions":{"verticalTimeAxis":false,"sortBy":{"internalName":"","displayName":"","visible":false},"wrapping":"","fixFirstColumn":false},"fieldOptions":null,"timeFormat":"","decimalPlaces":{"isEnforced":false,"digits":0},"links":{"self":"/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192"}} -`, - }, - { - name: "dashboard doesn't exist", - ID: "1", - DashboardsStore: &mocks.DashboardsStore{ - GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{}, fmt.Errorf("doesn't exist") - }, - }, - w: httptest.NewRecorder(), - r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", nil), - want: `{"code":404,"message":"ID 1 not found"}`, - }, - { - name: "cell doesn't exist", - ID: "1", - CID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - DashboardsStore: &mocks.DashboardsStore{ - GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{}, nil - }, - }, - w: httptest.NewRecorder(), - r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", nil), - want: `{"code":404,"message":"ID 3c5c4102-fa40-4585-a8f9-917c77e37192 not found"}`, - }, - { - name: "invalid query config", - ID: "1", - CID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - DashboardsStore: &mocks.DashboardsStore{ - GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: ID, - Cells: []chronograf.DashboardCell{ - { - ID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - }, - }, - }, nil - }, - }, - w: httptest.NewRecorder(), - r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", bytes.NewReader([]byte(`{ - "i": "3c5c4102-fa40-4585-a8f9-917c77e37192", - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "name": "Untitled Cell", - "queries": [ - { - "queryConfig": { - "fields": [ - { - "value": "invalid", - "type": "invalidType" - } - ] - } - } - ] - }`))), - want: `{"code":422,"message":"invalid field type \"invalidType\" ; expect func, field, integer, number, regex, wildcard"}`, - }, - { - name: "JSON is not parsable", - ID: "1", - CID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - DashboardsStore: &mocks.DashboardsStore{ - GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: ID, - Cells: []chronograf.DashboardCell{ - { - ID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - }, - }, - }, nil - }, - }, - w: httptest.NewRecorder(), - r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", nil), - want: `{"code":400,"message":"unparsable JSON"}`, - }, - { - name: "not able to update store returns error message", - ID: "1", - CID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - DashboardsStore: &mocks.DashboardsStore{ - UpdateF: func(ctx context.Context, target chronograf.Dashboard) error { - return fmt.Errorf("error") - }, - GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) { - return chronograf.Dashboard{ - ID: ID, - Cells: []chronograf.DashboardCell{ - { - ID: "3c5c4102-fa40-4585-a8f9-917c77e37192", - }, - }, - }, nil - }, - }, - w: httptest.NewRecorder(), - r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", bytes.NewReader([]byte(`{ - "i": "3c5c4102-fa40-4585-a8f9-917c77e37192", - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "name": "Untitled Cell", - "queries": [ - { - "queryConfig": { - "fields": [ - { - "value": "usage_user", - "type": "field" - } - ] - } - } - ] - }`))), - want: `{"code":500,"message":"Error updating cell 3c5c4102-fa40-4585-a8f9-917c77e37192 in dashboard 1: error"}`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - DashboardsStore: tt.DashboardsStore, - }, - Logger: &mocks.TestLogger{}, - } - tt.r = WithContext(tt.r.Context(), tt.r, map[string]string{ - "id": tt.ID, - "cid": tt.CID, - }) - tt.r = tt.r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.ID, - }, - { - Key: "cid", - Value: tt.CID, - }, - })) - s.ReplaceDashboardCell(tt.w, tt.r) - got := tt.w.Body.String() - if got != tt.want { - t.Errorf("ReplaceDashboardCell() = got/want\n%s\n%s\n", got, tt.want) - } - }) - } -} - -func strPtr(s string) *string { - return &s -} - -func Test_newCellResponses(t *testing.T) { - tests := []struct { - name string - dID chronograf.DashboardID - dcells []chronograf.DashboardCell - want []dashboardCellResponse - }{ - { - name: "all fields set", - dID: chronograf.DashboardID(1), - dcells: []chronograf.DashboardCell{ - chronograf.DashboardCell{ - ID: "445f8dc0-4d73-4168-8477-f628690d18a3", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "Untitled Cell", - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)", - Label: "", - QueryConfig: chronograf.QueryConfig{ - ID: "8d5ec6da-13a5-423e-9026-7bc45649766c", - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Alias: "mean_usage_user", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - Alias: "", - }, - }, - }, - }, - Tags: map[string][]string{"cpu": []string{"ChristohersMBP2.lan"}}, - GroupBy: chronograf.GroupBy{ - Time: "2s", - }, - AreTagsAccepted: true, - Fill: "null", - RawText: strPtr("SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)"), - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - Source: "", - }, - }, - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{}, - "y": chronograf.Axis{}, - "y2": chronograf.Axis{}, - }, - Type: "line", - CellColors: []chronograf.CellColor{ - chronograf.CellColor{ID: "0", Type: "min", Hex: "#00C9FF", Name: "laser", Value: "0"}, - chronograf.CellColor{ID: "1", Type: "max", Hex: "#9394FF", Name: "comet", Value: "100"}, - }, - Legend: chronograf.Legend{ - Type: "static", - Orientation: "bottom", - }, - }, - }, - want: []dashboardCellResponse{ - { - DashboardCell: chronograf.DashboardCell{ - ID: "445f8dc0-4d73-4168-8477-f628690d18a3", - W: 4, - H: 4, - Name: "Untitled Cell", - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)", - QueryConfig: chronograf.QueryConfig{ - ID: "8d5ec6da-13a5-423e-9026-7bc45649766c", - Database: "telegraf", - Measurement: "cpu", - RetentionPolicy: "autogen", - Fields: []chronograf.Field{ - { - Value: "mean", - Type: "func", - Alias: "mean_usage_user", - Args: []chronograf.Field{ - { - Value: "usage_user", - Type: "field", - }, - }, - }, - }, - Tags: map[string][]string{"cpu": {"ChristohersMBP2.lan"}}, - GroupBy: chronograf.GroupBy{ - Time: "2s", - }, - AreTagsAccepted: true, - Fill: "null", - RawText: strPtr("SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)"), - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - }, - Axes: map[string]chronograf.Axis{ - "x": {}, - "y": {}, - "y2": {}, - }, - Type: "line", - CellColors: []chronograf.CellColor{ - { - ID: "0", - Type: "min", - Hex: "#00C9FF", - Name: "laser", - Value: "0", - }, - { - ID: "1", - Type: "max", - Hex: "#9394FF", - Name: "comet", - Value: "100", - }, - }, - Legend: chronograf.Legend{ - Type: "static", - Orientation: "bottom", - }, - }, - Links: dashboardCellLinks{ - Self: "/chronograf/v1/dashboards/1/cells/445f8dc0-4d73-4168-8477-f628690d18a3"}, - }, - }, - }, - { - name: "nothing set", - dID: chronograf.DashboardID(1), - dcells: []chronograf.DashboardCell{ - chronograf.DashboardCell{ - ID: "445f8dc0-4d73-4168-8477-f628690d18a3", - X: 0, - Y: 0, - W: 4, - H: 4, - Name: "Untitled Cell", - }, - }, - want: []dashboardCellResponse{ - { - DashboardCell: chronograf.DashboardCell{ - ID: "445f8dc0-4d73-4168-8477-f628690d18a3", - W: 4, - H: 4, - Name: "Untitled Cell", - Queries: []chronograf.DashboardQuery{}, - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - "y": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - "y2": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - }, - CellColors: []chronograf.CellColor{}, - Legend: chronograf.Legend{}, - }, - Links: dashboardCellLinks{ - Self: "/chronograf/v1/dashboards/1/cells/445f8dc0-4d73-4168-8477-f628690d18a3"}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := newCellResponses(tt.dID, tt.dcells); !reflect.DeepEqual(got, tt.want) { - t.Errorf("newCellResponses() = got-/want+ %s", cmp.Diff(got, tt.want)) - } - }) - } -} - -func TestHasCorrectLegend(t *testing.T) { - tests := []struct { - name string - c *chronograf.DashboardCell - wantErr bool - }{ - { - name: "empty legend is ok", - c: &chronograf.DashboardCell{}, - }, - { - name: "must have both an orientation and type", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Type: "static", - }, - }, - wantErr: true, - }, - { - name: "must have both a type and orientation", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Orientation: "bottom", - }, - }, - wantErr: true, - }, - { - name: "invalid types", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Type: "no such type", - Orientation: "bottom", - }, - }, - wantErr: true, - }, - { - name: "invalid orientation", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Type: "static", - Orientation: "no such orientation", - }, - }, - wantErr: true, - }, - { - name: "orientation bottom valid", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Type: "static", - Orientation: "bottom", - }, - }, - }, - { - name: "orientation top valid", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Type: "static", - Orientation: "top", - }, - }, - }, - { - name: "orientation right valid", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Type: "static", - Orientation: "right", - }, - }, - }, - { - name: "orientation left valid", - c: &chronograf.DashboardCell{ - Legend: chronograf.Legend{ - Type: "static", - Orientation: "left", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := HasCorrectLegend(tt.c); (err != nil) != tt.wantErr { - t.Errorf("HasCorrectLegend() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/chronograf/server/config.go b/chronograf/server/config.go deleted file mode 100644 index cb5d07802cd..00000000000 --- a/chronograf/server/config.go +++ /dev/null @@ -1,115 +0,0 @@ -package server - -import ( - "encoding/json" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type configLinks struct { - Self string `json:"self"` // Self link mapping to this resource - Auth string `json:"auth"` // Auth link to the auth config endpoint -} - -type selfLinks struct { - Self string `json:"self"` // Self link mapping to this resource -} - -type configResponse struct { - Links configLinks `json:"links"` - chronograf.Config -} - -func newConfigResponse(config chronograf.Config) *configResponse { - return &configResponse{ - Links: configLinks{ - Self: "/chronograf/v1/config", - Auth: "/chronograf/v1/config/auth", - }, - Config: config, - } -} - -type authConfigResponse struct { - Links selfLinks `json:"links"` - chronograf.AuthConfig -} - -func newAuthConfigResponse(config chronograf.Config) *authConfigResponse { - return &authConfigResponse{ - Links: selfLinks{ - Self: "/chronograf/v1/config/auth", - }, - AuthConfig: config.Auth, - } -} - -// Config retrieves the global application configuration -func (s *Service) Config(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - config, err := s.Store.Config(ctx).Get(ctx) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - if config == nil { - Error(w, http.StatusBadRequest, "Configuration object was nil", s.Logger) - return - } - res := newConfigResponse(*config) - - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// AuthConfig retrieves the auth section of the global application configuration -func (s *Service) AuthConfig(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - config, err := s.Store.Config(ctx).Get(ctx) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - if config == nil { - Error(w, http.StatusBadRequest, "Configuration object was nil", s.Logger) - return - } - - res := newAuthConfigResponse(*config) - - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// ReplaceAuthConfig replaces the auth section of the global application configuration -func (s *Service) ReplaceAuthConfig(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - var authConfig chronograf.AuthConfig - if err := json.NewDecoder(r.Body).Decode(&authConfig); err != nil { - invalidJSON(w, s.Logger) - return - } - - config, err := s.Store.Config(ctx).Get(ctx) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - if config == nil { - Error(w, http.StatusBadRequest, "Configuration object was nil", s.Logger) - return - } - config.Auth = authConfig - - res := newAuthConfigResponse(*config) - if err := s.Store.Config(ctx).Update(ctx, config); err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - encodeJSON(w, http.StatusOK, res, s.Logger) -} diff --git a/chronograf/server/config_test.go b/chronograf/server/config_test.go deleted file mode 100644 index d9633ebcaef..00000000000 --- a/chronograf/server/config_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package server - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func TestConfig(t *testing.T) { - type fields struct { - ConfigStore chronograf.ConfigStore - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - wants wants - }{ - { - name: "Get global application configuration", - fields: fields{ - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":{"superAdminNewUsers":false}}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - ConfigStore: tt.fields.ConfigStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - - s.Config(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func TestAuthConfig(t *testing.T) { - type fields struct { - ConfigStore chronograf.ConfigStore - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - wants wants - }{ - { - name: "Get auth configuration", - fields: fields{ - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"superAdminNewUsers": false, "links": {"self": "/chronograf/v1/config/auth"}}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - ConfigStore: tt.fields.ConfigStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - - s.AuthConfig(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func TestReplaceAuthConfig(t *testing.T) { - type fields struct { - ConfigStore chronograf.ConfigStore - } - type args struct { - payload interface{} // expects JSON serializable struct - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Set auth configuration", - fields: fields{ - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - }, - args: args{ - payload: chronograf.AuthConfig{ - SuperAdminNewUsers: true, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"superAdminNewUsers": true, "links": {"self": "/chronograf/v1/config/auth"}}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - ConfigStore: tt.fields.ConfigStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - buf, _ := json.Marshal(tt.args.payload) - r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - - s.ReplaceAuthConfig(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} diff --git a/chronograf/server/context.go b/chronograf/server/context.go deleted file mode 100644 index 5cd4ea37972..00000000000 --- a/chronograf/server/context.go +++ /dev/null @@ -1,30 +0,0 @@ -package server - -import ( - "context" -) - -type serverContextKey string - -// ServerContextKey is the key used to specify that the -// server is making the requet via context -const ServerContextKey = serverContextKey("server") - -// hasServerContext specifies if the context contains -// the ServerContextKey and that the value stored there is true -func hasServerContext(ctx context.Context) bool { - // prevents panic in case of nil context - if ctx == nil { - return false - } - sa, ok := ctx.Value(ServerContextKey).(bool) - // should never happen - if !ok { - return false - } - return sa -} - -func serverContext(ctx context.Context) context.Context { - return context.WithValue(ctx, ServerContextKey, true) -} diff --git a/chronograf/server/dashboards.go b/chronograf/server/dashboards.go deleted file mode 100644 index 0e74c5b5334..00000000000 --- a/chronograf/server/dashboards.go +++ /dev/null @@ -1,287 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type dashboardLinks struct { - Self string `json:"self"` // Self link mapping to this resource - Cells string `json:"cells"` // Cells link to the cells endpoint - Templates string `json:"templates"` // Templates link to the templates endpoint -} - -type dashboardResponse struct { - ID chronograf.DashboardID `json:"id"` - Cells []dashboardCellResponse `json:"cells"` - Templates []templateResponse `json:"templates"` - Name string `json:"name"` - Organization string `json:"organization"` - Links dashboardLinks `json:"links"` -} - -type getDashboardsResponse struct { - Dashboards []*dashboardResponse `json:"dashboards"` -} - -func newDashboardResponse(d chronograf.Dashboard) *dashboardResponse { - base := "/chronograf/v1/dashboards" - dd := AddQueryConfigs(DashboardDefaults(d)) - cells := newCellResponses(dd.ID, dd.Cells) - templates := newTemplateResponses(dd.ID, dd.Templates) - - return &dashboardResponse{ - ID: dd.ID, - Name: dd.Name, - Cells: cells, - Templates: templates, - Organization: d.Organization, - Links: dashboardLinks{ - Self: fmt.Sprintf("%s/%d", base, dd.ID), - Cells: fmt.Sprintf("%s/%d/cells", base, dd.ID), - Templates: fmt.Sprintf("%s/%d/templates", base, dd.ID), - }, - } -} - -// Dashboards returns all dashboards within the store -func (s *Service) Dashboards(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - dashboards, err := s.Store.Dashboards(ctx).All(ctx) - if err != nil { - Error(w, http.StatusInternalServerError, "Error loading dashboards", s.Logger) - return - } - - res := getDashboardsResponse{ - Dashboards: []*dashboardResponse{}, - } - - for _, dashboard := range dashboards { - res.Dashboards = append(res.Dashboards, newDashboardResponse(dashboard)) - } - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// DashboardID returns a single specified dashboard -func (s *Service) DashboardID(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - e, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - res := newDashboardResponse(e) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// NewDashboard creates and returns a new dashboard object -func (s *Service) NewDashboard(w http.ResponseWriter, r *http.Request) { - var dashboard chronograf.Dashboard - var err error - if err := json.NewDecoder(r.Body).Decode(&dashboard); err != nil { - invalidJSON(w, s.Logger) - return - } - - ctx := r.Context() - defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - if err := ValidDashboardRequest(&dashboard, defaultOrg.ID); err != nil { - invalidData(w, err, s.Logger) - return - } - - if dashboard, err = s.Store.Dashboards(ctx).Add(r.Context(), dashboard); err != nil { - msg := fmt.Errorf("error storing dashboard %v: %v", dashboard, err) - unknownErrorWithMessage(w, msg, s.Logger) - return - } - - res := newDashboardResponse(dashboard) - location(w, res.Links.Self) - encodeJSON(w, http.StatusCreated, res, s.Logger) -} - -// RemoveDashboard deletes a dashboard -func (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - e, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - if err := s.Store.Dashboards(ctx).Delete(ctx, e); err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - w.WriteHeader(http.StatusNoContent) -} - -// ReplaceDashboard completely replaces a dashboard -func (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - idParam, err := paramID("id", r) - if err != nil { - msg := fmt.Sprintf("Could not parse dashboard ID: %s", err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - } - id := chronograf.DashboardID(idParam) - - _, err = s.Store.Dashboards(ctx).Get(ctx, id) - if err != nil { - Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id), s.Logger) - return - } - - var req chronograf.Dashboard - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - req.ID = id - - defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - if err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil { - invalidData(w, err, s.Logger) - return - } - - if err := s.Store.Dashboards(ctx).Update(ctx, req); err != nil { - msg := fmt.Sprintf("Error updating dashboard ID %d: %v", id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - res := newDashboardResponse(req) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// UpdateDashboard completely updates either the dashboard name or the cells -func (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - idParam, err := paramID("id", r) - if err != nil { - msg := fmt.Sprintf("Could not parse dashboard ID: %s", err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - id := chronograf.DashboardID(idParam) - - orig, err := s.Store.Dashboards(ctx).Get(ctx, id) - if err != nil { - Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id), s.Logger) - return - } - - var req chronograf.Dashboard - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - req.ID = id - - if req.Name != "" { - orig.Name = req.Name - } else if len(req.Cells) > 0 { - defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - if err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil { - invalidData(w, err, s.Logger) - return - } - orig.Cells = req.Cells - } else { - invalidData(w, fmt.Errorf("update must include either name or cells"), s.Logger) - return - } - - if err := s.Store.Dashboards(ctx).Update(ctx, orig); err != nil { - msg := fmt.Sprintf("Error updating dashboard ID %d: %v", id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - res := newDashboardResponse(orig) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// ValidDashboardRequest verifies that the dashboard cells have a query -func ValidDashboardRequest(d *chronograf.Dashboard, defaultOrgID string) error { - if d.Organization == "" { - d.Organization = defaultOrgID - } - for i, c := range d.Cells { - if err := ValidDashboardCellRequest(&c); err != nil { - return err - } - d.Cells[i] = c - } - for _, t := range d.Templates { - if err := ValidTemplateRequest(&t); err != nil { - return err - } - } - (*d) = DashboardDefaults(*d) - return nil -} - -// DashboardDefaults updates the dashboard with the default values -// if none are specified -func DashboardDefaults(d chronograf.Dashboard) (newDash chronograf.Dashboard) { - newDash.ID = d.ID - newDash.Templates = d.Templates - newDash.Name = d.Name - newDash.Organization = d.Organization - newDash.Cells = make([]chronograf.DashboardCell, len(d.Cells)) - - for i, c := range d.Cells { - CorrectWidthHeight(&c) - newDash.Cells[i] = c - } - return -} - -// AddQueryConfigs updates all the celsl in the dashboard to have query config -// objects corresponding to their influxql queries. -func AddQueryConfigs(d chronograf.Dashboard) (newDash chronograf.Dashboard) { - newDash.ID = d.ID - newDash.Templates = d.Templates - newDash.Name = d.Name - newDash.Cells = make([]chronograf.DashboardCell, len(d.Cells)) - - for i, c := range d.Cells { - AddQueryConfig(&c) - newDash.Cells[i] = c - } - return -} diff --git a/chronograf/server/dashboards_test.go b/chronograf/server/dashboards_test.go deleted file mode 100644 index a5ef0b7d9a9..00000000000 --- a/chronograf/server/dashboards_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package server - -import ( - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestCorrectWidthHeight(t *testing.T) { - t.Parallel() - tests := []struct { - name string - cell chronograf.DashboardCell - want chronograf.DashboardCell - }{ - { - name: "updates width", - cell: chronograf.DashboardCell{ - W: 0, - H: 4, - }, - want: chronograf.DashboardCell{ - W: 4, - H: 4, - }, - }, - { - name: "updates height", - cell: chronograf.DashboardCell{ - W: 4, - H: 0, - }, - want: chronograf.DashboardCell{ - W: 4, - H: 4, - }, - }, - { - name: "updates both", - cell: chronograf.DashboardCell{ - W: 0, - H: 0, - }, - want: chronograf.DashboardCell{ - W: 4, - H: 4, - }, - }, - { - name: "updates neither", - cell: chronograf.DashboardCell{ - W: 4, - H: 4, - }, - want: chronograf.DashboardCell{ - W: 4, - H: 4, - }, - }, - } - for _, tt := range tests { - if CorrectWidthHeight(&tt.cell); !reflect.DeepEqual(tt.cell, tt.want) { - t.Errorf("%q. CorrectWidthHeight() = %v, want %v", tt.name, tt.cell, tt.want) - } - } -} - -func TestDashboardDefaults(t *testing.T) { - tests := []struct { - name string - d chronograf.Dashboard - want chronograf.Dashboard - }{ - { - name: "Updates all cell widths/heights", - d: chronograf.Dashboard{ - Cells: []chronograf.DashboardCell{ - { - W: 0, - H: 0, - }, - { - W: 2, - H: 2, - }, - }, - }, - want: chronograf.Dashboard{ - Cells: []chronograf.DashboardCell{ - { - W: 4, - H: 4, - }, - { - W: 2, - H: 2, - }, - }, - }, - }, - { - name: "Updates no cell", - d: chronograf.Dashboard{ - Cells: []chronograf.DashboardCell{ - { - W: 4, - H: 4, - }, { - W: 2, - H: 2, - }, - }, - }, - want: chronograf.Dashboard{ - Cells: []chronograf.DashboardCell{ - { - W: 4, - H: 4, - }, - { - W: 2, - H: 2, - }, - }, - }, - }, - } - for _, tt := range tests { - if actual := DashboardDefaults(tt.d); !reflect.DeepEqual(actual, tt.want) { - t.Errorf("%q. DashboardDefaults() = %v, want %v", tt.name, tt.d, tt.want) - } - } -} - -func TestValidDashboardRequest(t *testing.T) { - tests := []struct { - name string - d chronograf.Dashboard - want chronograf.Dashboard - wantErr bool - }{ - { - name: "Updates all cell widths/heights", - d: chronograf.Dashboard{ - Organization: "1337", - Cells: []chronograf.DashboardCell{ - { - W: 0, - H: 0, - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT donors from hill_valley_preservation_society where time > 1985-10-25T08:00:00", - }, - }, - }, - { - W: 2, - H: 2, - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT winning_horses from grays_sports_alamanc where time > 1955-11-1T00:00:00", - }, - }, - }, - }, - }, - want: chronograf.Dashboard{ - Organization: "1337", - Cells: []chronograf.DashboardCell{ - { - W: 4, - H: 4, - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT donors from hill_valley_preservation_society where time > 1985-10-25T08:00:00", - }, - }, - }, - { - W: 2, - H: 2, - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT winning_horses from grays_sports_alamanc where time > 1955-11-1T00:00:00", - }, - }, - }, - }, - }, - }, - } - for _, tt := range tests { - // TODO(desa): this Okay? - err := ValidDashboardRequest(&tt.d, "0") - if (err != nil) != tt.wantErr { - t.Errorf("%q. ValidDashboardRequest() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if diff := cmp.Diff(tt.d, tt.want); diff != "" { - t.Errorf("%q. ValidDashboardRequest(). got/want diff:\n%s", tt.name, diff) - } - } -} - -func Test_newDashboardResponse(t *testing.T) { - tests := []struct { - name string - d chronograf.Dashboard - want *dashboardResponse - }{ - { - name: "creates a dashboard response", - d: chronograf.Dashboard{ - Organization: "0", - Cells: []chronograf.DashboardCell{ - { - ID: "a", - W: 0, - H: 0, - Queries: []chronograf.DashboardQuery{ - { - Source: "/chronograf/v1/sources/1", - Command: "SELECT donors from hill_valley_preservation_society where time > '1985-10-25 08:00:00'", - Shifts: []chronograf.TimeShift{ - { - Label: "Best Week Evar", - Unit: "d", - Quantity: "7", - }, - }, - }, - }, - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Bounds: []string{"0", "100"}, - }, - "y": chronograf.Axis{ - Bounds: []string{"2", "95"}, - Label: "foo", - }, - }, - }, - { - ID: "b", - W: 0, - H: 0, - Queries: []chronograf.DashboardQuery{ - { - Source: "/chronograf/v1/sources/2", - Command: "SELECT winning_horses from grays_sports_alamanc where time > now() - 15m", - }, - }, - }, - }, - }, - want: &dashboardResponse{ - Organization: "0", - Templates: []templateResponse{}, - Cells: []dashboardCellResponse{ - dashboardCellResponse{ - Links: dashboardCellLinks{ - Self: "/chronograf/v1/dashboards/0/cells/a", - }, - DashboardCell: chronograf.DashboardCell{ - ID: "a", - W: 4, - H: 4, - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT donors from hill_valley_preservation_society where time > '1985-10-25 08:00:00'", - Source: "/chronograf/v1/sources/1", - QueryConfig: chronograf.QueryConfig{ - RawText: &[]string{"SELECT donors from hill_valley_preservation_society where time > '1985-10-25 08:00:00'"}[0], - Fields: []chronograf.Field{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: make(map[string][]string), - AreTagsAccepted: false, - Shifts: []chronograf.TimeShift{ - { - Label: "Best Week Evar", - Unit: "d", - Quantity: "7", - }, - }, - }, - }, - }, - CellColors: []chronograf.CellColor{}, - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Bounds: []string{"0", "100"}, - }, - "y": chronograf.Axis{ - Bounds: []string{"2", "95"}, - Label: "foo", - }, - "y2": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - }, - }, - }, - dashboardCellResponse{ - Links: dashboardCellLinks{ - Self: "/chronograf/v1/dashboards/0/cells/b", - }, - DashboardCell: chronograf.DashboardCell{ - ID: "b", - W: 4, - H: 4, - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - "y": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - "y2": chronograf.Axis{ - Bounds: []string{"", ""}, - }, - }, - CellColors: []chronograf.CellColor{}, - Queries: []chronograf.DashboardQuery{ - { - Command: "SELECT winning_horses from grays_sports_alamanc where time > now() - 15m", - Source: "/chronograf/v1/sources/2", - QueryConfig: chronograf.QueryConfig{ - Measurement: "grays_sports_alamanc", - Fields: []chronograf.Field{ - { - Type: "field", - Value: "winning_horses", - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: make(map[string][]string), - AreTagsAccepted: false, - Range: &chronograf.DurationRange{ - Lower: "now() - 15m", - }, - }, - }, - }, - }, - }, - }, - Links: dashboardLinks{ - Self: "/chronograf/v1/dashboards/0", - Cells: "/chronograf/v1/dashboards/0/cells", - Templates: "/chronograf/v1/dashboards/0/templates", - }, - }, - }, - } - for _, tt := range tests { - if got := newDashboardResponse(tt.d); !cmp.Equal(got, tt.want) { - t.Errorf("%q. newDashboardResponse() = diff:\n%s", tt.name, cmp.Diff(got, tt.want)) - } - } -} diff --git a/chronograf/server/databases.go b/chronograf/server/databases.go deleted file mode 100644 index d79ea29d1ff..00000000000 --- a/chronograf/server/databases.go +++ /dev/null @@ -1,519 +0,0 @@ -package server - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strconv" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" -) - -const ( - limitQuery = "limit" - offsetQuery = "offset" -) - -type dbLinks struct { - Self string `json:"self"` // Self link mapping to this resource - RPs string `json:"retentionPolicies"` // URL for retention policies for this database - Measurements string `json:"measurements"` // URL for measurements for this database -} - -type dbResponse struct { - Name string `json:"name"` // a unique string identifier for the database - Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy) - Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy) - ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy) - RPs []rpResponse `json:"retentionPolicies"` // RPs are the retention policies for a database - Links dbLinks `json:"links"` // Links are URI locations related to the database -} - -// newDBResponse creates the response for the /databases endpoint -func newDBResponse(srcID int, db string, rps []rpResponse) dbResponse { - base := "/chronograf/v1/sources" - return dbResponse{ - Name: db, - RPs: rps, - Links: dbLinks{ - Self: fmt.Sprintf("%s/%d/dbs/%s", base, srcID, db), - RPs: fmt.Sprintf("%s/%d/dbs/%s/rps", base, srcID, db), - Measurements: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=100&offset=0", base, srcID, db), - }, - } -} - -type dbsResponse struct { - Databases []dbResponse `json:"databases"` -} - -type rpLinks struct { - Self string `json:"self"` // Self link mapping to this resource -} - -type rpResponse struct { - Name string `json:"name"` // a unique string identifier for the retention policy - Duration string `json:"duration"` // the duration - Replication int32 `json:"replication"` // the replication factor - ShardDuration string `json:"shardDuration"` // the shard duration - Default bool `json:"isDefault"` // whether the RP should be the default - Links rpLinks `json:"links"` // Links are URI locations related to the database -} - -// WithLinks adds links to an rpResponse in place -func (r *rpResponse) WithLinks(srcID int, db string) { - base := "/chronograf/v1/sources" - r.Links = rpLinks{ - Self: fmt.Sprintf("%s/%d/dbs/%s/rps/%s", base, srcID, db, r.Name), - } -} - -type measurementLinks struct { - Self string `json:"self"` - First string `json:"first"` - Next string `json:"next,omitempty"` - Prev string `json:"prev,omitempty"` -} - -func newMeasurementLinks(src int, db string, limit, offset int) measurementLinks { - base := "/chronograf/v1/sources" - res := measurementLinks{ - Self: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=%d", base, src, db, limit, offset), - First: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=0", base, src, db, limit), - Next: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=%d", base, src, db, limit, offset+limit), - } - if offset-limit > 0 { - res.Prev = fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=%d", base, src, db, limit, offset-limit) - } - - return res -} - -type measurementsResponse struct { - Measurements []chronograf.Measurement `json:"measurements"` // names of all measurements - Links measurementLinks `json:"links"` // Links are the URI locations for measurements pages -} - -// GetDatabases queries the list of all databases for a source -func (h *Service) GetDatabases(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - src, err := h.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, h.Logger) - return - } - - dbsvc := h.Databases - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - databases, err := dbsvc.AllDB(ctx) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), h.Logger) - return - } - - dbs := make([]dbResponse, len(databases)) - for i, d := range databases { - rps, err := h.allRPs(ctx, dbsvc, srcID, d.Name) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), h.Logger) - return - } - dbs[i] = newDBResponse(srcID, d.Name, rps) - } - - res := dbsResponse{ - Databases: dbs, - } - - encodeJSON(w, http.StatusOK, res, h.Logger) -} - -// NewDatabase creates a new database within the datastore -func (h *Service) NewDatabase(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - src, err := h.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, h.Logger) - return - } - - dbsvc := h.Databases - - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - postedDB := &chronograf.Database{} - if err := json.NewDecoder(r.Body).Decode(postedDB); err != nil { - invalidJSON(w, h.Logger) - return - } - - if err := ValidDatabaseRequest(postedDB); err != nil { - invalidData(w, err, h.Logger) - return - } - - database, err := dbsvc.CreateDB(ctx, postedDB) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), h.Logger) - return - } - - rps, err := h.allRPs(ctx, dbsvc, srcID, database.Name) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), h.Logger) - return - } - res := newDBResponse(srcID, database.Name, rps) - encodeJSON(w, http.StatusCreated, res, h.Logger) -} - -// DropDatabase removes a database from a data source -func (h *Service) DropDatabase(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - src, err := h.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, h.Logger) - return - } - - dbsvc := h.Databases - - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - db := httprouter.ParamsFromContext(ctx).ByName("db") - - dropErr := dbsvc.DropDB(ctx, db) - if dropErr != nil { - Error(w, http.StatusBadRequest, dropErr.Error(), h.Logger) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// RetentionPolicies lists retention policies within a database -func (h *Service) RetentionPolicies(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - src, err := h.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, h.Logger) - return - } - - dbsvc := h.Databases - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - db := httprouter.ParamsFromContext(ctx).ByName("db") - res, err := h.allRPs(ctx, dbsvc, srcID, db) - if err != nil { - msg := fmt.Sprintf("unable to connect get RPs %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - encodeJSON(w, http.StatusOK, res, h.Logger) -} - -func (h *Service) allRPs(ctx context.Context, dbsvc chronograf.Databases, srcID int, db string) ([]rpResponse, error) { - allRP, err := dbsvc.AllRP(ctx, db) - if err != nil { - return nil, err - } - - rps := make([]rpResponse, len(allRP)) - for i, rp := range allRP { - rp := rpResponse{ - Name: rp.Name, - Duration: rp.Duration, - Replication: rp.Replication, - ShardDuration: rp.ShardDuration, - Default: rp.Default, - } - rp.WithLinks(srcID, db) - rps[i] = rp - } - return rps, nil -} - -// NewRetentionPolicy creates a new retention policy for a database -func (h *Service) NewRetentionPolicy(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - src, err := h.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, h.Logger) - return - } - - dbsvc := h.Databases - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - postedRP := &chronograf.RetentionPolicy{} - if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil { - invalidJSON(w, h.Logger) - return - } - if err := ValidRetentionPolicyRequest(postedRP); err != nil { - invalidData(w, err, h.Logger) - return - } - - db := httprouter.ParamsFromContext(ctx).ByName("db") - rp, err := dbsvc.CreateRP(ctx, db, postedRP) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), h.Logger) - return - } - res := rpResponse{ - Name: rp.Name, - Duration: rp.Duration, - Replication: rp.Replication, - ShardDuration: rp.ShardDuration, - Default: rp.Default, - } - res.WithLinks(srcID, db) - encodeJSON(w, http.StatusCreated, res, h.Logger) -} - -// UpdateRetentionPolicy modifies an existing retention policy for a database -func (h *Service) UpdateRetentionPolicy(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - src, err := h.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, h.Logger) - return - } - - dbsvc := h.Databases - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - postedRP := &chronograf.RetentionPolicy{} - if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil { - invalidJSON(w, h.Logger) - return - } - if err := ValidRetentionPolicyRequest(postedRP); err != nil { - invalidData(w, err, h.Logger) - return - } - - params := httprouter.ParamsFromContext(ctx) - db := params.ByName("db") - rp := params.ByName("rp") - p, err := dbsvc.UpdateRP(ctx, db, rp, postedRP) - - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), h.Logger) - return - } - - res := rpResponse{ - Name: p.Name, - Duration: p.Duration, - Replication: p.Replication, - ShardDuration: p.ShardDuration, - Default: p.Default, - } - res.WithLinks(srcID, db) - encodeJSON(w, http.StatusCreated, res, h.Logger) -} - -// DropRetentionPolicy removes a retention policy from a database -func (s *Service) DropRetentionPolicy(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - src, err := s.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, s.Logger) - return - } - - dbsvc := s.Databases - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - params := httprouter.ParamsFromContext(ctx) - db := params.ByName("db") - rp := params.ByName("rp") - dropErr := dbsvc.DropRP(ctx, db, rp) - if dropErr != nil { - Error(w, http.StatusBadRequest, dropErr.Error(), s.Logger) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// Measurements lists measurements within a database -func (h *Service) Measurements(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - limit, offset, err := validMeasurementQuery(r.URL.Query()) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) - return - } - - src, err := h.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, h.Logger) - return - } - - dbsvc := h.Databases - if err = dbsvc.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - db := httprouter.ParamsFromContext(ctx).ByName("db") - measurements, err := dbsvc.GetMeasurements(ctx, db, limit, offset) - if err != nil { - msg := fmt.Sprintf("Unable to get measurements %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, h.Logger) - return - } - - res := measurementsResponse{ - Measurements: measurements, - Links: newMeasurementLinks(srcID, db, limit, offset), - } - - encodeJSON(w, http.StatusOK, res, h.Logger) -} - -func validMeasurementQuery(query url.Values) (limit, offset int, err error) { - limitParam := query.Get(limitQuery) - if limitParam == "" { - limit = 100 - } else { - limit, err = strconv.Atoi(limitParam) - if err != nil { - return - } - if limit <= 0 { - limit = 100 - } - } - - offsetParam := query.Get(offsetQuery) - if offsetParam == "" { - offset = 0 - } else { - offset, err = strconv.Atoi(offsetParam) - if err != nil { - return - } - if offset < 0 { - offset = 0 - } - } - - return -} - -// ValidDatabaseRequest checks if the database posted is valid -func ValidDatabaseRequest(d *chronograf.Database) error { - if len(d.Name) == 0 { - return fmt.Errorf("name is required") - } - return nil -} - -// ValidRetentionPolicyRequest checks if a retention policy is valid on POST -func ValidRetentionPolicyRequest(rp *chronograf.RetentionPolicy) error { - if len(rp.Name) == 0 { - return fmt.Errorf("name is required") - } - if len(rp.Duration) == 0 { - return fmt.Errorf("duration is required") - } - if rp.Replication == 0 { - return fmt.Errorf("replication factor is invalid") - } - return nil -} diff --git a/chronograf/server/databases_test.go b/chronograf/server/databases_test.go deleted file mode 100644 index 5372da0806b..00000000000 --- a/chronograf/server/databases_test.go +++ /dev/null @@ -1,648 +0,0 @@ -package server - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func TestService_GetDatabases(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - Databases chronograf.Databases - } - type args struct { - w http.ResponseWriter - r *http.Request - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &Service{ - Store: &Store{ - SourcesStore: tt.fields.SourcesStore, - ServersStore: tt.fields.ServersStore, - LayoutsStore: tt.fields.LayoutsStore, - UsersStore: tt.fields.UsersStore, - DashboardsStore: tt.fields.DashboardsStore, - }, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - Databases: tt.fields.Databases, - } - h.GetDatabases(tt.args.w, tt.args.r) - }) - } -} - -func TestService_NewDatabase(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - Databases chronograf.Databases - } - type args struct { - w http.ResponseWriter - r *http.Request - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &Service{ - Store: &Store{ - SourcesStore: tt.fields.SourcesStore, - ServersStore: tt.fields.ServersStore, - LayoutsStore: tt.fields.LayoutsStore, - UsersStore: tt.fields.UsersStore, - DashboardsStore: tt.fields.DashboardsStore, - }, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - Databases: tt.fields.Databases, - } - h.NewDatabase(tt.args.w, tt.args.r) - }) - } -} - -func TestService_DropDatabase(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - Databases chronograf.Databases - } - type args struct { - w http.ResponseWriter - r *http.Request - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &Service{ - Store: &Store{ - SourcesStore: tt.fields.SourcesStore, - ServersStore: tt.fields.ServersStore, - LayoutsStore: tt.fields.LayoutsStore, - UsersStore: tt.fields.UsersStore, - DashboardsStore: tt.fields.DashboardsStore, - }, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - Databases: tt.fields.Databases, - } - h.DropDatabase(tt.args.w, tt.args.r) - }) - } -} - -func TestService_RetentionPolicies(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - Databases chronograf.Databases - } - type args struct { - w http.ResponseWriter - r *http.Request - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &Service{ - Store: &Store{ - SourcesStore: tt.fields.SourcesStore, - ServersStore: tt.fields.ServersStore, - LayoutsStore: tt.fields.LayoutsStore, - UsersStore: tt.fields.UsersStore, - DashboardsStore: tt.fields.DashboardsStore, - }, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - Databases: tt.fields.Databases, - } - h.RetentionPolicies(tt.args.w, tt.args.r) - }) - } -} - -func TestService_NewRetentionPolicy(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - Databases chronograf.Databases - } - type args struct { - w http.ResponseWriter - r *http.Request - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &Service{ - Store: &Store{ - SourcesStore: tt.fields.SourcesStore, - ServersStore: tt.fields.ServersStore, - LayoutsStore: tt.fields.LayoutsStore, - UsersStore: tt.fields.UsersStore, - DashboardsStore: tt.fields.DashboardsStore, - }, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - Databases: tt.fields.Databases, - } - h.NewRetentionPolicy(tt.args.w, tt.args.r) - }) - } -} - -func TestService_UpdateRetentionPolicy(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - Databases chronograf.Databases - } - type args struct { - w http.ResponseWriter - r *http.Request - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &Service{ - Store: &Store{ - SourcesStore: tt.fields.SourcesStore, - ServersStore: tt.fields.ServersStore, - LayoutsStore: tt.fields.LayoutsStore, - UsersStore: tt.fields.UsersStore, - DashboardsStore: tt.fields.DashboardsStore, - }, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - Databases: tt.fields.Databases, - } - h.UpdateRetentionPolicy(tt.args.w, tt.args.r) - }) - } -} - -func TestService_DropRetentionPolicy(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - Databases chronograf.Databases - } - type args struct { - w http.ResponseWriter - r *http.Request - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &Service{ - Store: &Store{ - SourcesStore: tt.fields.SourcesStore, - ServersStore: tt.fields.ServersStore, - LayoutsStore: tt.fields.LayoutsStore, - UsersStore: tt.fields.UsersStore, - DashboardsStore: tt.fields.DashboardsStore, - }, - TimeSeriesClient: tt.fields.TimeSeriesClient, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - Databases: tt.fields.Databases, - } - h.DropRetentionPolicy(tt.args.w, tt.args.r) - }) - } -} - -func TestService_Measurements(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - Logger chronograf.Logger - Databases chronograf.Databases - } - type args struct { - queryParams map[string]string - } - type wants struct { - statusCode int - body string - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Gets 100 measurements when no limit or offset provided", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 0, - }, nil - }, - }, - Databases: &mocks.Databases{ - ConnectF: func(context.Context, *chronograf.Source) error { - return nil - }, - GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) { - return []chronograf.Measurement{ - { - Name: "pineapple", - }, - { - Name: "cubeapple", - }, - { - Name: "pinecube", - }, - }, nil - }, - }, - }, - args: args{ - queryParams: map[string]string{}, - }, - wants: wants{ - statusCode: 200, - body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=100"}} -`, - }, - }, - { - name: "Fails when invalid limit value provided", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 0, - }, nil - }, - }, - }, - args: args{ - queryParams: map[string]string{ - "limit": "joe", - }, - }, - wants: wants{ - statusCode: 422, - body: `{"code":422,"message":"strconv.Atoi: parsing \"joe\": invalid syntax"}`, - }, - }, - { - name: "Fails when invalid offset value provided", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 0, - }, nil - }, - }, - }, - args: args{ - queryParams: map[string]string{ - "offset": "bob", - }, - }, - wants: wants{ - statusCode: 422, - body: `{"code":422,"message":"strconv.Atoi: parsing \"bob\": invalid syntax"}`, - }, - }, - { - name: "Overrides limit less than or equal to 0 with limit 100", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 0, - }, nil - }, - }, - Databases: &mocks.Databases{ - ConnectF: func(context.Context, *chronograf.Source) error { - return nil - }, - GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) { - return []chronograf.Measurement{ - { - Name: "pineapple", - }, - { - Name: "cubeapple", - }, - { - Name: "pinecube", - }, - }, nil - }, - }, - }, - args: args{ - queryParams: map[string]string{ - "limit": "0", - }, - }, - wants: wants{ - statusCode: 200, - body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=100"}} -`, - }, - }, - { - name: "Overrides offset less than 0 with offset 0", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 0, - }, nil - }, - }, - Databases: &mocks.Databases{ - ConnectF: func(context.Context, *chronograf.Source) error { - return nil - }, - GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) { - return []chronograf.Measurement{ - { - Name: "pineapple", - }, - { - Name: "cubeapple", - }, - { - Name: "pinecube", - }, - }, nil - }, - }, - }, - args: args{ - queryParams: map[string]string{ - "offset": "-1337", - }, - }, - wants: wants{ - statusCode: 200, - body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=100"}} -`, - }, - }, - { - name: "Provides a prev link when offset exceeds limit", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 0, - }, nil - }, - }, - Databases: &mocks.Databases{ - ConnectF: func(context.Context, *chronograf.Source) error { - return nil - }, - GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) { - return []chronograf.Measurement{ - { - Name: "pineapple", - }, - { - Name: "cubeapple", - }, - { - Name: "pinecube", - }, - { - Name: "billietta", - }, - { - Name: "bobbetta", - }, - { - Name: "bobcube", - }, - }, nil - }, - }, - }, - args: args{ - queryParams: map[string]string{ - "limit": "2", - "offset": "4", - }, - }, - wants: wants{ - statusCode: 200, - body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"},{"name":"billietta"},{"name":"bobbetta"},{"name":"bobcube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=4","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=6","prev":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=2"}} -`, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - logger := &chronograf.NoopLogger{} - h := &Service{ - Store: &mocks.Store{ - SourcesStore: tt.fields.SourcesStore, - }, - Logger: logger, - Databases: tt.fields.Databases, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest( - "GET", - "http://any.url", - nil, - ) - r = r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: "0", - }, - { - Key: "db", - Value: "pineapples", - }, - })) - - q := r.URL.Query() - for key, value := range tt.args.queryParams { - q.Add(key, value) - } - r.URL.RawQuery = q.Encode() - - h.Measurements(w, r) - - resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - - if err != nil { - t.Error("TestService_Measurements not able to retrieve body") - } - - var msmts measurementsResponse - if err := json.Unmarshal(body, &msmts); err != nil { - t.Error("TestService_Measurements not able to unmarshal JSON response") - } - - if tt.wants.statusCode != resp.StatusCode { - t.Errorf("%q. StatusCode:\nwant\n%v\ngot\n%v", tt.name, tt.wants.statusCode, resp.StatusCode) - } - - if tt.wants.body != string(body) { - t.Errorf("%q. Body:\nwant\n*%s*\ngot\n*%s*", tt.name, tt.wants.body, string(body)) - } - }) - } -} - -func TestValidDatabaseRequest(t *testing.T) { - type args struct { - d *chronograf.Database - } - tests := []struct { - name string - args args - wantErr bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := ValidDatabaseRequest(tt.args.d); (err != nil) != tt.wantErr { - t.Errorf("ValidDatabaseRequest() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestValidRetentionPolicyRequest(t *testing.T) { - type args struct { - rp *chronograf.RetentionPolicy - } - tests := []struct { - name string - args args - wantErr bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := ValidRetentionPolicyRequest(tt.args.rp); (err != nil) != tt.wantErr { - t.Errorf("ValidRetentionPolicyRequest() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/chronograf/server/env.go b/chronograf/server/env.go deleted file mode 100644 index 8573e77e0c9..00000000000 --- a/chronograf/server/env.go +++ /dev/null @@ -1,27 +0,0 @@ -package server - -import ( - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type envResponse struct { - Links selfLinks `json:"links"` - TelegrafSystemInterval string `json:"telegrafSystemInterval"` -} - -func newEnvResponse(env chronograf.Environment) *envResponse { - return &envResponse{ - Links: selfLinks{ - Self: "/chronograf/v1/env", - }, - TelegrafSystemInterval: env.TelegrafSystemInterval.String(), - } -} - -// Environment retrieves the global application configuration -func (s *Service) Environment(w http.ResponseWriter, r *http.Request) { - res := newEnvResponse(s.Env) - encodeJSON(w, http.StatusOK, res, s.Logger) -} diff --git a/chronograf/server/env_test.go b/chronograf/server/env_test.go deleted file mode 100644 index 07a8ee88f75..00000000000 --- a/chronograf/server/env_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package server - -import ( - "io/ioutil" - "net/http/httptest" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestEnvironment(t *testing.T) { - type fields struct { - Environment chronograf.Environment - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - wants wants - }{ - { - name: "Get environment", - fields: fields{ - Environment: chronograf.Environment{ - TelegrafSystemInterval: 1 * time.Minute, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/env"},"telegrafSystemInterval":"1m0s"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Env: tt.fields.Environment, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - - s.Environment(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} diff --git a/chronograf/server/helpers.go b/chronograf/server/helpers.go deleted file mode 100644 index 89527349823..00000000000 --- a/chronograf/server/helpers.go +++ /dev/null @@ -1,7 +0,0 @@ -package server - -import "net/http" - -func location(w http.ResponseWriter, self string) { - w.Header().Add("Location", self) -} diff --git a/chronograf/server/hsts.go b/chronograf/server/hsts.go deleted file mode 100644 index 1b6f54d71aa..00000000000 --- a/chronograf/server/hsts.go +++ /dev/null @@ -1,12 +0,0 @@ -package server - -import "net/http" - -// HSTS add HTTP Strict Transport Security header with a max-age of two years -// Inspired from https://blog.bracebin.com/achieving-perfect-ssl-labs-score-with-go -func HSTS(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") - next.ServeHTTP(w, r) - }) -} diff --git a/chronograf/server/influx.go b/chronograf/server/influx.go deleted file mode 100644 index a8786879707..00000000000 --- a/chronograf/server/influx.go +++ /dev/null @@ -1,142 +0,0 @@ -package server - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -// ValidInfluxRequest checks if queries specify a command. -func ValidInfluxRequest(p chronograf.Query) error { - if p.Command == "" { - return fmt.Errorf("query field required") - } - return nil -} - -type postInfluxResponse struct { - Results interface{} `json:"results"` // results from influx -} - -// Influx proxies requests to influxdb. -func (s *Service) Influx(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - var req chronograf.Query - if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - if err = ValidInfluxRequest(req); err != nil { - invalidData(w, err, s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, id) - if err != nil { - notFound(w, id, s.Logger) - return - } - - ts, err := s.TimeSeries(src) - if err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - if err = ts.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", id, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - response, err := ts.Query(ctx, req) - if err != nil { - if err == chronograf.ErrUpstreamTimeout { - msg := "Timeout waiting for Influx response" - Error(w, http.StatusRequestTimeout, msg, s.Logger) - return - } - // TODO: Here I want to return the error code from influx. - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := postInfluxResponse{ - Results: response, - } - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -func (s *Service) Write(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, id) - if err != nil { - notFound(w, id, s.Logger) - return - } - - u, err := url.Parse(src.URL) - if err != nil { - msg := fmt.Sprintf("Error parsing source url: %v", err) - Error(w, http.StatusUnprocessableEntity, msg, s.Logger) - return - } - u.Path = "/write" - u.RawQuery = r.URL.RawQuery - - director := func(req *http.Request) { - // Set the Host header of the original source URL - req.Host = u.Host - req.URL = u - // Because we are acting as a proxy, influxdb needs to have the - // basic auth or bearer token information set as a header directly - auth := influx.DefaultAuthorization(&src) - auth.Set(req) - } - - proxy := &httputil.ReverseProxy{ - Director: director, - } - - // The connection to influxdb is using a self-signed certificate. - // This modifies uses the same values as http.DefaultTransport but specifies - // InsecureSkipVerify - if src.InsecureSkipVerify { - proxy.Transport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - } - - proxy.ServeHTTP(w, r) -} diff --git a/chronograf/server/influx_test.go b/chronograf/server/influx_test.go deleted file mode 100644 index 715fb6681e4..00000000000 --- a/chronograf/server/influx_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package server - -import ( - "bytes" - "context" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func TestService_Influx(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - TimeSeries TimeSeriesClient - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - type want struct { - StatusCode int - ContentType string - Body string - } - tests := []struct { - name string - fields fields - args args - ID string - want want - }{ - { - name: "Proxies request to Influxdb", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1337, - URL: "http://any.url", - }, nil - }, - }, - TimeSeries: &mocks.TimeSeries{ - ConnectF: func(ctx context.Context, src *chronograf.Source) error { - return nil - }, - QueryF: func(ctx context.Context, query chronograf.Query) (chronograf.Response, error) { - return mocks.NewResponse( - `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["cpu","cpu-total"],["cpu","cpu0"],["cpu","cpu1"],["cpu","cpu2"],["cpu","cpu3"],["host","pineapples-MBP"],["host","pineapples-MacBook-Pro.local"]]}]}]}`, - nil, - ), - nil - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - ioutil.NopCloser( - bytes.NewReader([]byte( - `{"db":"bob", "rp":"joe", "query":"SELECT mean(\"usage_user\") FROM cpu WHERE \"cpu\" = 'cpu-total' AND time > now() - 10m GROUP BY host;"}`, - )), - ), - ), - }, - ID: "1", - want: want{ - StatusCode: http.StatusOK, - ContentType: "application/json", - Body: `{"results":{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["cpu","cpu-total"],["cpu","cpu0"],["cpu","cpu1"],["cpu","cpu2"],["cpu","cpu3"],["host","pineapples-MBP"],["host","pineapples-MacBook-Pro.local"]]}]}]}} -`, - }, - }, - } - - for _, tt := range tests { - tt.args.r = tt.args.r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.ID, - }, - })) - h := &Service{ - Store: &mocks.Store{ - SourcesStore: tt.fields.SourcesStore, - }, - TimeSeriesClient: tt.fields.TimeSeries, - } - h.Influx(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - contentType := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.want.StatusCode { - t.Errorf("%q. Influx() = got %v, want %v", tt.name, resp.StatusCode, tt.want.StatusCode) - } - if contentType != tt.want.ContentType { - t.Errorf("%q. Influx() = got %v, want %v", tt.name, contentType, tt.want.ContentType) - } - if string(body) != tt.want.Body { - t.Errorf("%q. Influx() =\ngot ***%v***\nwant ***%v***\n", tt.name, string(body), tt.want.Body) - } - - } -} diff --git a/chronograf/server/kapacitors.go b/chronograf/server/kapacitors.go deleted file mode 100644 index 3fc09da3a7b..00000000000 --- a/chronograf/server/kapacitors.go +++ /dev/null @@ -1,791 +0,0 @@ -package server - -// TODO(desa): resolve kapacitor dependency - -//type postKapacitorRequest struct { -// Name *string `json:"name"` // User facing name of kapacitor instance.; Required: true -// URL *string `json:"url"` // URL for the kapacitor backend (e.g. http://localhost:9092);/ Required: true -// Username string `json:"username,omitempty"` // Username for authentication to kapacitor -// Password string `json:"password,omitempty"` -// InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the kapacitor is accepted. -// Active bool `json:"active"` -// Organization string `json:"organization"` // Organization is the organization ID that resource belongs to -//} -// -//func (p *postKapacitorRequest) Valid(defaultOrgID string) error { -// if p.Name == nil || p.URL == nil { -// return fmt.Errorf("name and url required") -// } -// -// if p.Organization == "" { -// p.Organization = defaultOrgID -// } -// -// url, err := url.ParseRequestURI(*p.URL) -// if err != nil { -// return fmt.Errorf("invalid source URI: %v", err) -// } -// if len(url.Scheme) == 0 { -// return fmt.Errorf("invalid URL; no URL scheme defined") -// } -// -// return nil -//} -// -//type kapaLinks struct { -// Proxy string `json:"proxy"` // URL location of proxy endpoint for this source -// Self string `json:"self"` // Self link mapping to this resource -// Rules string `json:"rules"` // Rules link for defining roles alerts for kapacitor -// Tasks string `json:"tasks"` // Tasks link to define a task against the proxy -// Ping string `json:"ping"` // Ping path to kapacitor -//} -// -//type kapacitor struct { -// ID int `json:"id,string"` // Unique identifier representing a kapacitor instance. -// Name string `json:"name"` // User facing name of kapacitor instance. -// URL string `json:"url"` // URL for the kapacitor backend (e.g. http://localhost:9092) -// Username string `json:"username,omitempty"` // Username for authentication to kapacitor -// Password string `json:"password,omitempty"` -// InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the kapacitor is accepted. -// Active bool `json:"active"` -// Links kapaLinks `json:"links"` // Links are URI locations related to kapacitor -//} -// -//// NewKapacitor adds valid kapacitor store store. -//func (s *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) { -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// _, err = s.Store.Sources(ctx).Get(ctx, srcID) -// if err != nil { -// notFound(w, srcID, s.Logger) -// return -// } -// -// var req postKapacitorRequest -// if err = json.NewDecoder(r.Body).Decode(&req); err != nil { -// invalidJSON(w, s.Logger) -// return -// } -// -// defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx) -// if err != nil { -// unknownErrorWithMessage(w, err, s.Logger) -// return -// } -// -// if err := req.Valid(defaultOrg.ID); err != nil { -// invalidData(w, err, s.Logger) -// return -// } -// -// srv := chronograf.Server{ -// SrcID: srcID, -// Name: *req.Name, -// Username: req.Username, -// Password: req.Password, -// InsecureSkipVerify: req.InsecureSkipVerify, -// URL: *req.URL, -// Active: req.Active, -// Organization: req.Organization, -// } -// -// if srv, err = s.Store.Servers(ctx).Add(ctx, srv); err != nil { -// msg := fmt.Errorf("error storing kapacitor %v: %v", req, err) -// unknownErrorWithMessage(w, msg, s.Logger) -// return -// } -// -// res := newKapacitor(srv) -// location(w, res.Links.Self) -// encodeJSON(w, http.StatusCreated, res, s.Logger) -//} -// -//func newKapacitor(srv chronograf.Server) kapacitor { -// httpAPISrcs := "/chronograf/v1/sources" -// return kapacitor{ -// ID: srv.ID, -// Name: srv.Name, -// Username: srv.Username, -// URL: srv.URL, -// Active: srv.Active, -// InsecureSkipVerify: srv.InsecureSkipVerify, -// Links: kapaLinks{ -// Self: fmt.Sprintf("%s/%d/kapacitors/%d", httpAPISrcs, srv.SrcID, srv.ID), -// Proxy: fmt.Sprintf("%s/%d/kapacitors/%d/proxy", httpAPISrcs, srv.SrcID, srv.ID), -// Rules: fmt.Sprintf("%s/%d/kapacitors/%d/rules", httpAPISrcs, srv.SrcID, srv.ID), -// Tasks: fmt.Sprintf("%s/%d/kapacitors/%d/proxy?path=/kapacitor/v1/tasks", httpAPISrcs, srv.SrcID, srv.ID), -// Ping: fmt.Sprintf("%s/%d/kapacitors/%d/proxy?path=/kapacitor/v1/ping", httpAPISrcs, srv.SrcID, srv.ID), -// }, -// } -//} -// -//type kapacitors struct { -// Kapacitors []kapacitor `json:"kapacitors"` -//} -// -//// Kapacitors retrieves all kapacitors from store. -//func (s *Service) Kapacitors(w http.ResponseWriter, r *http.Request) { -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// mrSrvs, err := s.Store.Servers(ctx).All(ctx) -// if err != nil { -// Error(w, http.StatusInternalServerError, "Error loading kapacitors", s.Logger) -// return -// } -// -// srvs := []kapacitor{} -// for _, srv := range mrSrvs { -// if srv.SrcID == srcID && srv.Type == "" { -// srvs = append(srvs, newKapacitor(srv)) -// } -// } -// -// res := kapacitors{ -// Kapacitors: srvs, -// } -// -// encodeJSON(w, http.StatusOK, res, s.Logger) -//} -// -//// KapacitorsID retrieves a kapacitor with ID from store. -//func (s *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID || srv.Type != "" { -// notFound(w, id, s.Logger) -// return -// } -// -// res := newKapacitor(srv) -// encodeJSON(w, http.StatusOK, res, s.Logger) -//} -// -//// RemoveKapacitor deletes kapacitor from store. -//func (s *Service) RemoveKapacitor(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID || srv.Type != "" { -// notFound(w, id, s.Logger) -// return -// } -// -// if err = s.Store.Servers(ctx).Delete(ctx, srv); err != nil { -// unknownErrorWithMessage(w, err, s.Logger) -// return -// } -// -// w.WriteHeader(http.StatusNoContent) -//} -// -//type patchKapacitorRequest struct { -// Name *string `json:"name,omitempty"` // User facing name of kapacitor instance. -// URL *string `json:"url,omitempty"` // URL for the kapacitor -// Username *string `json:"username,omitempty"` // Username for kapacitor auth -// Password *string `json:"password,omitempty"` -// InsecureSkipVerify *bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the kapacitor is accepted. -// Active *bool `json:"active"` -//} -// -//func (p *patchKapacitorRequest) Valid() error { -// if p.URL != nil { -// url, err := url.ParseRequestURI(*p.URL) -// if err != nil { -// return fmt.Errorf("invalid source URI: %v", err) -// } -// if len(url.Scheme) == 0 { -// return fmt.Errorf("invalid URL; no URL scheme defined") -// } -// } -// return nil -//} -// -//// UpdateKapacitor incrementally updates a kapacitor definition in the store -//func (s *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID || srv.Type != "" { -// notFound(w, id, s.Logger) -// return -// } -// -// var req patchKapacitorRequest -// if err := json.NewDecoder(r.Body).Decode(&req); err != nil { -// invalidJSON(w, s.Logger) -// return -// } -// -// if err := req.Valid(); err != nil { -// invalidData(w, err, s.Logger) -// return -// } -// -// if req.Name != nil { -// srv.Name = *req.Name -// } -// if req.URL != nil { -// srv.URL = *req.URL -// } -// if req.Password != nil { -// srv.Password = *req.Password -// } -// if req.Username != nil { -// srv.Username = *req.Username -// } -// if req.InsecureSkipVerify != nil { -// srv.InsecureSkipVerify = *req.InsecureSkipVerify -// } -// if req.Active != nil { -// srv.Active = *req.Active -// } -// -// if err := s.Store.Servers(ctx).Update(ctx, srv); err != nil { -// msg := fmt.Sprintf("Error updating kapacitor ID %d", id) -// Error(w, http.StatusInternalServerError, msg, s.Logger) -// return -// } -// -// res := newKapacitor(srv) -// encodeJSON(w, http.StatusOK, res, s.Logger) -//} -// -//// KapacitorRulesPost proxies POST to kapacitor -//func (s *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID { -// notFound(w, id, s.Logger) -// return -// } -// -// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify) -// -// var req chronograf.AlertRule -// if err = json.NewDecoder(r.Body).Decode(&req); err != nil { -// invalidData(w, err, s.Logger) -// return -// } -// // TODO: validate this data -// /* -// if err := req.Valid(); err != nil { -// invalidData(w, err) -// return -// } -// */ -// -// if req.Name == "" { -// req.Name = req.ID -// } -// -// req.ID = "" -// task, err := c.Create(ctx, req) -// if err != nil { -// invalidData(w, err, s.Logger) -// return -// } -// res := newAlertResponse(task, srv.SrcID, srv.ID) -// location(w, res.Links.Self) -// encodeJSON(w, http.StatusCreated, res, s.Logger) -//} -// -//type alertLinks struct { -// Self string `json:"self"` -// Kapacitor string `json:"kapacitor"` -// Output string `json:"output"` -//} -// -//type alertResponse struct { -// chronograf.AlertRule -// Links alertLinks `json:"links"` -//} -// -//// newAlertResponse formats task into an alertResponse -//func newAlertResponse(task *kapa.Task, srcID, kapaID int) *alertResponse { -// res := &alertResponse{ -// AlertRule: task.Rule, -// Links: alertLinks{ -// Self: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/rules/%s", srcID, kapaID, task.ID), -// Kapacitor: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(task.Href)), -// Output: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(task.HrefOutput)), -// }, -// } -// -// if res.AlertNodes.Alerta == nil { -// res.AlertNodes.Alerta = []*chronograf.Alerta{} -// } -// -// for i, a := range res.AlertNodes.Alerta { -// if a.Service == nil { -// a.Service = []string{} -// res.AlertNodes.Alerta[i] = a -// } -// } -// -// if res.AlertNodes.Email == nil { -// res.AlertNodes.Email = []*chronograf.Email{} -// } -// -// for i, a := range res.AlertNodes.Email { -// if a.To == nil { -// a.To = []string{} -// res.AlertNodes.Email[i] = a -// } -// } -// -// if res.AlertNodes.Exec == nil { -// res.AlertNodes.Exec = []*chronograf.Exec{} -// } -// -// for i, a := range res.AlertNodes.Exec { -// if a.Command == nil { -// a.Command = []string{} -// res.AlertNodes.Exec[i] = a -// } -// } -// -// if res.AlertNodes.HipChat == nil { -// res.AlertNodes.HipChat = []*chronograf.HipChat{} -// } -// -// if res.AlertNodes.Kafka == nil { -// res.AlertNodes.Kafka = []*chronograf.Kafka{} -// } -// -// if res.AlertNodes.Log == nil { -// res.AlertNodes.Log = []*chronograf.Log{} -// } -// -// if res.AlertNodes.OpsGenie == nil { -// res.AlertNodes.OpsGenie = []*chronograf.OpsGenie{} -// } -// -// for i, a := range res.AlertNodes.OpsGenie { -// if a.Teams == nil { -// a.Teams = []string{} -// res.AlertNodes.OpsGenie[i] = a -// } -// -// if a.Recipients == nil { -// a.Recipients = []string{} -// res.AlertNodes.OpsGenie[i] = a -// } -// } -// -// if res.AlertNodes.OpsGenie2 == nil { -// res.AlertNodes.OpsGenie2 = []*chronograf.OpsGenie{} -// } -// -// for i, a := range res.AlertNodes.OpsGenie2 { -// if a.Teams == nil { -// a.Teams = []string{} -// res.AlertNodes.OpsGenie2[i] = a -// } -// -// if a.Recipients == nil { -// a.Recipients = []string{} -// res.AlertNodes.OpsGenie2[i] = a -// } -// } -// -// if res.AlertNodes.PagerDuty == nil { -// res.AlertNodes.PagerDuty = []*chronograf.PagerDuty{} -// } -// -// if res.AlertNodes.PagerDuty2 == nil { -// res.AlertNodes.PagerDuty2 = []*chronograf.PagerDuty{} -// } -// -// if res.AlertNodes.Posts == nil { -// res.AlertNodes.Posts = []*chronograf.Post{} -// } -// -// for i, a := range res.AlertNodes.Posts { -// if a.Headers == nil { -// a.Headers = map[string]string{} -// res.AlertNodes.Posts[i] = a -// } -// } -// -// if res.AlertNodes.Pushover == nil { -// res.AlertNodes.Pushover = []*chronograf.Pushover{} -// } -// -// if res.AlertNodes.Sensu == nil { -// res.AlertNodes.Sensu = []*chronograf.Sensu{} -// } -// -// for i, a := range res.AlertNodes.Sensu { -// if a.Handlers == nil { -// a.Handlers = []string{} -// res.AlertNodes.Sensu[i] = a -// } -// } -// -// if res.AlertNodes.Slack == nil { -// res.AlertNodes.Slack = []*chronograf.Slack{} -// } -// -// if res.AlertNodes.Talk == nil { -// res.AlertNodes.Talk = []*chronograf.Talk{} -// } -// -// if res.AlertNodes.TCPs == nil { -// res.AlertNodes.TCPs = []*chronograf.TCP{} -// } -// -// if res.AlertNodes.Telegram == nil { -// res.AlertNodes.Telegram = []*chronograf.Telegram{} -// } -// -// if res.AlertNodes.VictorOps == nil { -// res.AlertNodes.VictorOps = []*chronograf.VictorOps{} -// } -// -// if res.Query != nil { -// if res.Query.ID == "" { -// res.Query.ID = res.ID -// } -// -// if res.Query.Fields == nil { -// res.Query.Fields = make([]chronograf.Field, 0) -// } -// -// if res.Query.GroupBy.Tags == nil { -// res.Query.GroupBy.Tags = make([]string, 0) -// } -// -// if res.Query.Tags == nil { -// res.Query.Tags = make(map[string][]string) -// } -// } -// return res -//} -// -//// ValidRuleRequest checks if the requested rule change is valid -//func ValidRuleRequest(rule chronograf.AlertRule) error { -// if rule.Query == nil { -// return fmt.Errorf("invalid alert rule: no query defined") -// } -// var hasFuncs bool -// for _, f := range rule.Query.Fields { -// if f.Type == "func" && len(f.Args) > 0 { -// hasFuncs = true -// } -// } -// // All kapacitor rules with functions must have a window that is applied -// // every amount of time -// if rule.Every == "" && hasFuncs { -// return fmt.Errorf(`invalid alert rule: functions require an "every" window`) -// } -// return nil -//} -// -//// KapacitorRulesPut proxies PATCH to kapacitor -//func (s *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID { -// notFound(w, id, s.Logger) -// return -// } -// -// tid := httprouter.GetParamFromContext(ctx, "tid") -// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify) -// var req chronograf.AlertRule -// if err = json.NewDecoder(r.Body).Decode(&req); err != nil { -// invalidData(w, err, s.Logger) -// return -// } -// // TODO: validate this data -// /* -// if err := req.Valid(); err != nil { -// invalidData(w, err) -// return -// } -// */ -// -// // Check if the rule exists and is scoped correctly -// if _, err = c.Get(ctx, tid); err != nil { -// if err == chronograf.ErrAlertNotFound { -// notFound(w, id, s.Logger) -// return -// } -// Error(w, http.StatusInternalServerError, err.Error(), s.Logger) -// return -// } -// -// // Replace alert completely with this new alert. -// req.ID = tid -// task, err := c.Update(ctx, c.Href(tid), req) -// if err != nil { -// invalidData(w, err, s.Logger) -// return -// } -// res := newAlertResponse(task, srv.SrcID, srv.ID) -// encodeJSON(w, http.StatusOK, res, s.Logger) -//} -// -//// KapacitorStatus is the current state of a running task -//type KapacitorStatus struct { -// Status string `json:"status"` -//} -// -//// Valid check if the kapacitor status is enabled or disabled -//func (k *KapacitorStatus) Valid() error { -// if k.Status == "enabled" || k.Status == "disabled" { -// return nil -// } -// return fmt.Errorf("invalid Kapacitor status: %s", k.Status) -//} -// -//// KapacitorRulesStatus proxies PATCH to kapacitor to enable/disable tasks -//func (s *Service) KapacitorRulesStatus(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID { -// notFound(w, id, s.Logger) -// return -// } -// -// tid := httprouter.GetParamFromContext(ctx, "tid") -// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify) -// -// var req KapacitorStatus -// if err = json.NewDecoder(r.Body).Decode(&req); err != nil { -// invalidJSON(w, s.Logger) -// return -// } -// if err := req.Valid(); err != nil { -// invalidData(w, err, s.Logger) -// return -// } -// -// // Check if the rule exists and is scoped correctly -// _, err = c.Get(ctx, tid) -// if err != nil { -// if err == chronograf.ErrAlertNotFound { -// notFound(w, id, s.Logger) -// return -// } -// Error(w, http.StatusInternalServerError, err.Error(), s.Logger) -// return -// } -// -// var task *kapa.Task -// if req.Status == "enabled" { -// task, err = c.Enable(ctx, c.Href(tid)) -// } else { -// task, err = c.Disable(ctx, c.Href(tid)) -// } -// -// if err != nil { -// Error(w, http.StatusInternalServerError, err.Error(), s.Logger) -// return -// } -// -// res := newAlertResponse(task, srv.SrcID, srv.ID) -// encodeJSON(w, http.StatusOK, res, s.Logger) -//} -// -//// KapacitorRulesGet retrieves all rules -//func (s *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID { -// notFound(w, id, s.Logger) -// return -// } -// -// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify) -// tasks, err := c.All(ctx) -// if err != nil { -// Error(w, http.StatusInternalServerError, err.Error(), s.Logger) -// return -// } -// -// res := allAlertsResponse{ -// Rules: []*alertResponse{}, -// } -// for _, task := range tasks { -// ar := newAlertResponse(task, srv.SrcID, srv.ID) -// res.Rules = append(res.Rules, ar) -// } -// encodeJSON(w, http.StatusOK, res, s.Logger) -//} -// -//type allAlertsResponse struct { -// Rules []*alertResponse `json:"rules"` -//} -// -//// KapacitorRulesID retrieves specific task -//func (s *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID { -// notFound(w, id, s.Logger) -// return -// } -// tid := httprouter.GetParamFromContext(ctx, "tid") -// -// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify) -// -// // Check if the rule exists within scope -// task, err := c.Get(ctx, tid) -// if err != nil { -// if err == chronograf.ErrAlertNotFound { -// notFound(w, id, s.Logger) -// return -// } -// Error(w, http.StatusInternalServerError, err.Error(), s.Logger) -// return -// } -// -// res := newAlertResponse(task, srv.SrcID, srv.ID) -// encodeJSON(w, http.StatusOK, res, s.Logger) -//} -// -//// KapacitorRulesDelete proxies DELETE to kapacitor -//func (s *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) { -// id, err := paramID("kid", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// srcID, err := paramID("id", r) -// if err != nil { -// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) -// return -// } -// -// ctx := r.Context() -// srv, err := s.Store.Servers(ctx).Get(ctx, id) -// if err != nil || srv.SrcID != srcID { -// notFound(w, id, s.Logger) -// return -// } -// -// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify) -// -// tid := httprouter.GetParamFromContext(ctx, "tid") -// // Check if the rule is linked to this server and kapacitor -// if _, err := c.Get(ctx, tid); err != nil { -// if err == chronograf.ErrAlertNotFound { -// notFound(w, id, s.Logger) -// return -// } -// Error(w, http.StatusInternalServerError, err.Error(), s.Logger) -// return -// } -// if err := c.Delete(ctx, c.Href(tid)); err != nil { -// Error(w, http.StatusInternalServerError, err.Error(), s.Logger) -// return -// } -// -// w.WriteHeader(http.StatusNoContent) -//} diff --git a/chronograf/server/kapacitors_test.go b/chronograf/server/kapacitors_test.go deleted file mode 100644 index 59f544b3fc4..00000000000 --- a/chronograf/server/kapacitors_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package server_test - -//const tickScript = ` -//stream -// |from() -// .measurement('cpu') -// |alert() -// .crit(lambda: "usage_idle" < 10) -// .log('/tmp/alert') -//` -// -//func TestValidRuleRequest(t *testing.T) { -// tests := []struct { -// name string -// rule chronograf.AlertRule -// wantErr bool -// }{ -// { -// name: "No every with functions", -// rule: chronograf.AlertRule{ -// Query: &chronograf.QueryConfig{ -// Fields: []chronograf.Field{ -// { -// Value: "max", -// Type: "func", -// Args: []chronograf.Field{ -// { -// Value: "oldmanpeabody", -// Type: "field", -// }, -// }, -// }, -// }, -// }, -// }, -// wantErr: true, -// }, -// { -// name: "With every", -// rule: chronograf.AlertRule{ -// Every: "10s", -// Query: &chronograf.QueryConfig{ -// Fields: []chronograf.Field{ -// { -// Value: "max", -// Type: "func", -// Args: []chronograf.Field{ -// { -// Value: "oldmanpeabody", -// Type: "field", -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// { -// name: "No query config", -// rule: chronograf.AlertRule{}, -// wantErr: true, -// }, -// } -// for _, tt := range tests { -// t.Run(tt.name, func(t *testing.T) { -// if err := server.ValidRuleRequest(tt.rule); (err != nil) != tt.wantErr { -// t.Errorf("ValidRuleRequest() error = %v, wantErr %v", err, tt.wantErr) -// } -// }) -// } -//} -// -//func Test_KapacitorRulesGet(t *testing.T) { -// kapaTests := []struct { -// name string -// requestPath string -// mockAlerts []chronograf.AlertRule -// expected []chronograf.AlertRule -// }{ -// { -// name: "basic", -// requestPath: "/chronograf/v1/sources/1/kapacitors/1/rules", -// mockAlerts: []chronograf.AlertRule{ -// { -// ID: "cpu_alert", -// Name: "cpu_alert", -// Status: "enabled", -// Type: "stream", -// DBRPs: []chronograf.DBRP{{DB: "telegraf", RP: "autogen"}}, -// TICKScript: tickScript, -// }, -// }, -// expected: []chronograf.AlertRule{ -// { -// ID: "cpu_alert", -// Name: "cpu_alert", -// Status: "enabled", -// Type: "stream", -// DBRPs: []chronograf.DBRP{{DB: "telegraf", RP: "autogen"}}, -// TICKScript: tickScript, -// AlertNodes: chronograf.AlertNodes{ -// Posts: []*chronograf.Post{}, -// TCPs: []*chronograf.TCP{}, -// Email: []*chronograf.Email{}, -// Exec: []*chronograf.Exec{}, -// Log: []*chronograf.Log{}, -// VictorOps: []*chronograf.VictorOps{}, -// PagerDuty: []*chronograf.PagerDuty{}, -// PagerDuty2: []*chronograf.PagerDuty{}, -// Pushover: []*chronograf.Pushover{}, -// Sensu: []*chronograf.Sensu{}, -// Slack: []*chronograf.Slack{}, -// Telegram: []*chronograf.Telegram{}, -// HipChat: []*chronograf.HipChat{}, -// Alerta: []*chronograf.Alerta{}, -// OpsGenie: []*chronograf.OpsGenie{}, -// OpsGenie2: []*chronograf.OpsGenie{}, -// Talk: []*chronograf.Talk{}, -// Kafka: []*chronograf.Kafka{}, -// }, -// }, -// }, -// }, -// } -// -// for _, test := range kapaTests { -// test := test // needed to avoid data race -// t.Run(test.name, func(t *testing.T) { -// t.Parallel() -// -// // setup mock kapa API -// kapaSrv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { -// params := r.URL.Query() -// limit, err := strconv.Atoi(params.Get("limit")) -// if err != nil { -// rw.WriteHeader(http.StatusBadRequest) -// return -// } -// offset, err := strconv.Atoi(params.Get("offset")) -// if err != nil { -// rw.WriteHeader(http.StatusBadRequest) -// return -// } -// -// tsks := []map[string]interface{}{} -// for _, task := range test.mockAlerts { -// tsks = append(tsks, map[string]interface{}{ -// "id": task.ID, -// "script": tickScript, -// "status": "enabled", -// "type": "stream", -// "dbrps": []chronograf.DBRP{ -// { -// DB: "telegraf", -// RP: "autogen", -// }, -// }, -// "link": map[string]interface{}{ -// "rel": "self", -// "href": "/kapacitor/v1/tasks/cpu_alert", -// }, -// }) -// } -// -// var tasks map[string]interface{} -// -// if offset >= len(tsks) { -// tasks = map[string]interface{}{ -// "tasks": []map[string]interface{}{}, -// } -// } else if limit+offset > len(tsks) { -// tasks = map[string]interface{}{ -// "tasks": tsks[offset:], -// } -// } -// //} else { -// //tasks = map[string]interface{}{ -// //"tasks": tsks[offset : offset+limit], -// //} -// //} -// -// err = json.NewEncoder(rw).Encode(&tasks) -// if err != nil { -// t.Error("Failed to encode JSON. err:", err) -// } -// })) -// defer kapaSrv.Close() -// -// // setup mock service and test logger -// testLogger := mocks.TestLogger{} -// svc := &server.Service{ -// Store: &mocks.Store{ -// SourcesStore: &mocks.SourcesStore{ -// GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { -// return chronograf.Source{ -// ID: ID, -// InsecureSkipVerify: true, -// }, nil -// }, -// }, -// ServersStore: &mocks.ServersStore{ -// GetF: func(ctx context.Context, ID int) (chronograf.Server, error) { -// return chronograf.Server{ -// SrcID: ID, -// URL: kapaSrv.URL, -// }, nil -// }, -// }, -// }, -// Logger: &testLogger, -// } -// -// // setup request and response recorder -// req := httptest.NewRequest("GET", test.requestPath, strings.NewReader("")) -// rr := httptest.NewRecorder() -// -// // setup context and request params -// bg := context.Background() -// params := httprouter.Params{ -// { -// Key: "id", -// Value: "1", -// }, -// { -// Key: "kid", -// Value: "1", -// }, -// } -// ctx := httprouter.WithParams(bg, params) -// req = req.WithContext(ctx) -// -// // invoke KapacitorRulesGet endpoint -// svc.KapacitorRulesGet(rr, req) -// -// // destructure response -// frame := struct { -// Rules []struct { -// chronograf.AlertRule -// Links json.RawMessage `json:"links"` -// } `json:"rules"` -// }{} -// -// resp := rr.Result() -// -// err := json.NewDecoder(resp.Body).Decode(&frame) -// if err != nil { -// t.Fatal("Err decoding kapa rule response: err:", err) -// } -// -// actual := make([]chronograf.AlertRule, len(frame.Rules)) -// -// for i := range frame.Rules { -// actual[i] = frame.Rules[i].AlertRule -// } -// -// if resp.StatusCode != http.StatusOK { -// t.Fatal("Expected HTTP 200 OK but got", resp.Status) -// } -// -// if !cmp.Equal(test.expected, actual) { -// t.Fatalf("%q - Alert rules differ! diff:\n%s\n", test.name, cmp.Diff(test.expected, actual)) -// } -// }) -// } -//} diff --git a/chronograf/server/layout.go b/chronograf/server/layout.go deleted file mode 100644 index 9c3ad03277b..00000000000 --- a/chronograf/server/layout.go +++ /dev/null @@ -1,119 +0,0 @@ -package server - -import ( - "fmt" - "net/http" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" -) - -type link struct { - Href string `json:"href"` - Rel string `json:"rel"` -} - -type layoutResponse struct { - chronograf.Layout - Link link `json:"link"` -} - -func newLayoutResponse(layout chronograf.Layout) layoutResponse { - httpAPILayouts := "/chronograf/v1/layouts" - href := fmt.Sprintf("%s/%s", httpAPILayouts, layout.ID) - rel := "self" - - for idx, cell := range layout.Cells { - axes := []string{"x", "y", "y2"} - - if cell.Axes == nil { - layout.Cells[idx].Axes = make(map[string]chronograf.Axis, len(axes)) - } - - if cell.CellColors == nil { - layout.Cells[idx].CellColors = []chronograf.CellColor{} - } - - for _, axis := range axes { - if _, found := cell.Axes[axis]; !found { - layout.Cells[idx].Axes[axis] = chronograf.Axis{ - Bounds: []string{}, - } - } - } - } - - return layoutResponse{ - Layout: layout, - Link: link{ - Href: href, - Rel: rel, - }, - } -} - -type getLayoutsResponse struct { - Layouts []layoutResponse `json:"layouts"` -} - -// Layouts retrieves all layouts from store -func (s *Service) Layouts(w http.ResponseWriter, r *http.Request) { - // Construct a filter sieve for both applications and measurements - filtered := map[string]bool{} - for _, a := range r.URL.Query()["app"] { - filtered[a] = true - } - - for _, m := range r.URL.Query()["measurement"] { - filtered[m] = true - } - - ctx := r.Context() - layouts, err := s.Store.Layouts(ctx).All(ctx) - if err != nil { - Error(w, http.StatusInternalServerError, "Error loading layouts", s.Logger) - return - } - - filter := func(layout *chronograf.Layout) bool { - // If the length of the filter is zero then all values are acceptable. - if len(filtered) == 0 { - return true - } - - // If filter contains either measurement or application - return filtered[layout.Measurement] || filtered[layout.Application] - } - - res := getLayoutsResponse{ - Layouts: []layoutResponse{}, - } - - seen := make(map[string]bool) - for _, layout := range layouts { - // remove duplicates - if seen[layout.Measurement+layout.ID] { - continue - } - // filter for data that belongs to provided application or measurement - if filter(&layout) { - res.Layouts = append(res.Layouts, newLayoutResponse(layout)) - } - } - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// LayoutsID retrieves layout with ID from store -func (s *Service) LayoutsID(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id := httprouter.GetParamFromContext(ctx, "id") - - layout, err := s.Store.Layouts(ctx).Get(ctx, id) - if err != nil { - Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), s.Logger) - return - } - - res := newLayoutResponse(layout) - encodeJSON(w, http.StatusOK, res, s.Logger) -} diff --git a/chronograf/server/layout_test.go b/chronograf/server/layout_test.go deleted file mode 100644 index c2c4103703e..00000000000 --- a/chronograf/server/layout_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package server_test - -import ( - "context" - "encoding/json" - "net/http/httptest" - "net/url" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/server" -) - -func Test_Layouts(t *testing.T) { - layoutTests := []struct { - name string - expected chronograf.Layout - allLayouts []chronograf.Layout - focusedApp string // should filter all layouts to this app only - shouldErr bool - }{ - { - "empty layout", - chronograf.Layout{}, - []chronograf.Layout{}, - "", - false, - }, - { - "several layouts", - chronograf.Layout{ - ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c", - Application: "influxdb", - Measurement: "influxdb", - }, - []chronograf.Layout{ - chronograf.Layout{ - ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c", - Application: "influxdb", - Measurement: "influxdb", - }, - }, - "", - false, - }, - { - "filtered app", - chronograf.Layout{ - ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c", - Application: "influxdb", - Measurement: "influxdb", - }, - []chronograf.Layout{ - chronograf.Layout{ - ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c", - Application: "influxdb", - Measurement: "influxdb", - }, - chronograf.Layout{ - ID: "b020101b-ea6b-4c8c-9f0e-db0ba501f4ef", - Application: "chronograf", - Measurement: "chronograf", - }, - }, - "influxdb", - false, - }, - { - "axis zero values", - chronograf.Layout{ - ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c", - Application: "influxdb", - Measurement: "influxdb", - Cells: []chronograf.Cell{ - { - X: 0, - Y: 0, - W: 4, - H: 4, - I: "3b0e646b-2ca3-4df2-95a5-fd80915459dd", - Name: "A Graph", - CellColors: []chronograf.CellColor{}, - Axes: map[string]chronograf.Axis{ - "x": chronograf.Axis{ - Bounds: []string{}, - }, - "y": chronograf.Axis{ - Bounds: []string{}, - }, - "y2": chronograf.Axis{ - Bounds: []string{}, - }, - }, - }, - }, - }, - []chronograf.Layout{ - chronograf.Layout{ - ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c", - Application: "influxdb", - Measurement: "influxdb", - Cells: []chronograf.Cell{ - { - X: 0, - Y: 0, - W: 4, - H: 4, - I: "3b0e646b-2ca3-4df2-95a5-fd80915459dd", - CellColors: []chronograf.CellColor{}, - Name: "A Graph", - }, - }, - }, - }, - "", - false, - }, - } - - for _, test := range layoutTests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - // setup mock chronograf.Service and mock logger - lg := &mocks.TestLogger{} - svc := server.Service{ - Store: &mocks.Store{LayoutsStore: &mocks.LayoutsStore{ - AllF: func(ctx context.Context) ([]chronograf.Layout, error) { - if len(test.allLayouts) == 0 { - return []chronograf.Layout{ - test.expected, - }, nil - } else { - return test.allLayouts, nil - } - }, - }, - }, - Logger: lg, - } - - // setup mock request and response - rr := httptest.NewRecorder() - reqURL := url.URL{ - Path: "/chronograf/v1/layouts", - } - params := reqURL.Query() - - // add query params required by test - if test.focusedApp != "" { - params.Add("app", test.focusedApp) - } - - // re-inject query params - reqURL.RawQuery = params.Encode() - - req := httptest.NewRequest("GET", reqURL.RequestURI(), strings.NewReader("")) - - // invoke handler for layouts endpoint - svc.Layouts(rr, req) - - // create a throwaway frame to unwrap Layouts - respFrame := struct { - Layouts []struct { - chronograf.Layout - Link interface{} `json:"-"` - } `json:"layouts"` - }{} - - // decode resp into respFrame - resp := rr.Result() - if err := json.NewDecoder(resp.Body).Decode(&respFrame); err != nil { - t.Fatalf("%q - Error unmarshalling JSON: err: %s", test.name, err.Error()) - } - - // compare actual and expected - if !cmp.Equal(test.expected, respFrame.Layouts[0].Layout) { - t.Fatalf("%q - Expected layouts to be equal: diff:\n\t%s", test.name, cmp.Diff(test.expected, respFrame.Layouts[0].Layout)) - } - }) - } -} diff --git a/chronograf/server/links.go b/chronograf/server/links.go deleted file mode 100644 index acfdfd7cf50..00000000000 --- a/chronograf/server/links.go +++ /dev/null @@ -1,59 +0,0 @@ -package server - -import ( - "errors" - "net/url" -) - -type getFluxLinksResponse struct { - AST string `json:"ast"` - Self string `json:"self"` - Suggestions string `json:"suggestions"` -} - -type getConfigLinksResponse struct { - Self string `json:"self"` // Location of the whole global application configuration - Auth string `json:"auth"` // Location of the auth section of the global application configuration -} - -type getOrganizationConfigLinksResponse struct { - Self string `json:"self"` // Location of the organization configuration - LogViewer string `json:"logViewer"` // Location of the organization-specific log viewer configuration -} - -type getExternalLinksResponse struct { - StatusFeed *string `json:"statusFeed,omitempty"` // Location of the a JSON Feed for client's Status page News Feed - CustomLinks []CustomLink `json:"custom,omitempty"` // Any custom external links for client's User menu -} - -// CustomLink is a handler that returns a custom link to be used in server's routes response, within ExternalLinks -type CustomLink struct { - Name string `json:"name"` - URL string `json:"url"` -} - -// NewCustomLinks transforms `--custom-link` CLI flag data or `CUSTOM_LINKS` ENV -// var data into a data structure that the Chronograf client will expect -func NewCustomLinks(links map[string]string) ([]CustomLink, error) { - customLinks := make([]CustomLink, 0, len(links)) - for name, link := range links { - if name == "" { - return nil, errors.New("customLink missing key for Name") - } - if link == "" { - return nil, errors.New("customLink missing value for URL") - } - _, err := url.Parse(link) - if err != nil { - return nil, err - } - - customLink := CustomLink{ - Name: name, - URL: link, - } - customLinks = append(customLinks, customLink) - } - - return customLinks, nil -} diff --git a/chronograf/server/links_test.go b/chronograf/server/links_test.go deleted file mode 100644 index 0ff6835fd6b..00000000000 --- a/chronograf/server/links_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package server - -import ( - "reflect" - "testing" -) - -func TestNewCustomLinks(t *testing.T) { - tests := []struct { - name string - args map[string]string - want []CustomLink - wantErr bool - }{ - { - name: "Unknown error in NewCustomLinks", - args: map[string]string{ - "cubeapple": "https://cube.apple", - }, - want: []CustomLink{ - { - Name: "cubeapple", - URL: "https://cube.apple", - }, - }, - }, - { - name: "CustomLink missing Name", - args: map[string]string{ - "": "https://cube.apple", - }, - wantErr: true, - }, - { - name: "CustomLink missing URL", - args: map[string]string{ - "cubeapple": "", - }, - wantErr: true, - }, - { - name: "Missing protocol scheme", - args: map[string]string{ - "cubeapple": ":k%8a#", - }, - wantErr: true, - }, - } - - for _, tt := range tests { - got, err := NewCustomLinks(tt.args) - if (err != nil) != tt.wantErr { - t.Errorf("%q. NewCustomLinks() error = %v, wantErr %v", tt.name, err, tt.wantErr) - continue - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. NewCustomLinks() = %v, want %v", tt.name, got, tt.want) - } - } -} diff --git a/chronograf/server/logger.go b/chronograf/server/logger.go deleted file mode 100644 index cb88bf06066..00000000000 --- a/chronograf/server/logger.go +++ /dev/null @@ -1,63 +0,0 @@ -package server - -import ( - "net/http" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// statusWriterFlusher captures the status header of an http.ResponseWriter -// and is a flusher -type statusWriter struct { - http.ResponseWriter - Flusher http.Flusher - status int -} - -func (w *statusWriter) WriteHeader(status int) { - w.status = status - w.ResponseWriter.WriteHeader(status) -} - -func (w *statusWriter) Status() int { return w.status } - -// Flush is here because the underlying HTTP chunked transfer response writer -// to implement http.Flusher. Without it data is silently buffered. This -// was discovered when proxying kapacitor chunked logs. -func (w *statusWriter) Flush() { - if w.Flusher != nil { - w.Flusher.Flush() - } -} - -// Logger is middleware that logs the request -func Logger(logger chronograf.Logger, next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - logger.WithField("component", "server"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL). - Debug("Request") - - sw := &statusWriter{ - ResponseWriter: w, - } - if f, ok := w.(http.Flusher); ok { - sw.Flusher = f - } - next.ServeHTTP(sw, r) - later := time.Now() - elapsed := later.Sub(now) - - logger. - WithField("component", "server"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("response_time", elapsed.String()). - WithField("status", sw.Status()). - Info("Response: ", http.StatusText(sw.Status())) - } - return http.HandlerFunc(fn) -} diff --git a/chronograf/server/logout.go b/chronograf/server/logout.go deleted file mode 100644 index dd7c2cabe47..00000000000 --- a/chronograf/server/logout.go +++ /dev/null @@ -1,24 +0,0 @@ -package server - -import ( - "net/http" - "path" -) - -// Logout chooses the correct provider logout route and redirects to it -func Logout(nextURL, basepath string, routes AuthRoutes) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - principal, err := getPrincipal(ctx) - if err != nil { - http.Redirect(w, r, path.Join(basepath, nextURL), http.StatusTemporaryRedirect) - return - } - route, ok := routes.Lookup(principal.Issuer) - if !ok { - http.Redirect(w, r, path.Join(basepath, nextURL), http.StatusTemporaryRedirect) - return - } - http.Redirect(w, r, route.Logout, http.StatusTemporaryRedirect) - } -} diff --git a/chronograf/server/mapping.go b/chronograf/server/mapping.go deleted file mode 100644 index 134e9f6a8fa..00000000000 --- a/chronograf/server/mapping.go +++ /dev/null @@ -1,264 +0,0 @@ -package server - -import ( - "context" - - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -func (s *Service) mapPrincipalToSuperAdmin(p oauth2.Principal) bool { - if p.Issuer != "auth0" { - return false - } - - groups := strings.Split(p.Group, ",") - superAdmin := false - for _, group := range groups { - if group != "" && group == s.SuperAdminProviderGroups.auth0 { - superAdmin = true - break - } - } - return superAdmin -} - -func (s *Service) mapPrincipalToRoles(ctx context.Context, p oauth2.Principal) ([]chronograf.Role, error) { - mappings, err := s.Store.Mappings(ctx).All(ctx) - if err != nil { - return nil, err - } - roles := []chronograf.Role{} -MappingsLoop: - for _, mapping := range mappings { - if applyMapping(mapping, p) { - org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &mapping.Organization}) - if err != nil { - continue MappingsLoop - } - - for _, role := range roles { - if role.Organization == org.ID { - continue MappingsLoop - } - } - roles = append(roles, chronograf.Role{Organization: org.ID, Name: org.DefaultRole}) - } - } - - return roles, nil -} - -func applyMapping(m chronograf.Mapping, p oauth2.Principal) bool { - switch m.Provider { - case chronograf.MappingWildcard, p.Issuer: - default: - return false - } - - switch m.Scheme { - case chronograf.MappingWildcard, "oauth2": - default: - return false - } - - if m.ProviderOrganization == chronograf.MappingWildcard { - return true - } - - groups := strings.Split(p.Group, ",") - - return matchGroup(m.ProviderOrganization, groups) -} - -func matchGroup(match string, groups []string) bool { - for _, group := range groups { - if match == group { - return true - } - } - - return false -} - -type mappingsRequest chronograf.Mapping - -// Valid determines if a mapping request is valid -func (m *mappingsRequest) Valid() error { - if m.Provider == "" { - return fmt.Errorf("mapping must specify provider") - } - if m.Scheme == "" { - return fmt.Errorf("mapping must specify scheme") - } - if m.ProviderOrganization == "" { - return fmt.Errorf("mapping must specify group") - } - - return nil -} - -type mappingResponse struct { - Links selfLinks `json:"links"` - chronograf.Mapping -} - -func newMappingResponse(m chronograf.Mapping) *mappingResponse { - - return &mappingResponse{ - Links: selfLinks{ - Self: fmt.Sprintf("/chronograf/v1/mappings/%s", m.ID), - }, - Mapping: m, - } -} - -type mappingsResponse struct { - Links selfLinks `json:"links"` - Mappings []*mappingResponse `json:"mappings"` -} - -func newMappingsResponse(ms []chronograf.Mapping) *mappingsResponse { - mappings := []*mappingResponse{} - for _, m := range ms { - mappings = append(mappings, newMappingResponse(m)) - } - return &mappingsResponse{ - Links: selfLinks{ - Self: "/chronograf/v1/mappings", - }, - Mappings: mappings, - } -} - -// Mappings retrieves all mappings -func (s *Service) Mappings(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - mappings, err := s.Store.Mappings(ctx).All(ctx) - if err != nil { - Error(w, http.StatusInternalServerError, "failed to retrieve mappings from database", s.Logger) - return - } - - res := newMappingsResponse(mappings) - - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// NewMapping adds a new mapping -func (s *Service) NewMapping(w http.ResponseWriter, r *http.Request) { - var req mappingsRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := req.Valid(); err != nil { - invalidData(w, err, s.Logger) - return - } - - ctx := r.Context() - - // validate that the organization exists - if !s.organizationExists(ctx, req.Organization) { - invalidData(w, fmt.Errorf("organization does not exist"), s.Logger) - return - } - - mapping := &chronograf.Mapping{ - Organization: req.Organization, - Scheme: req.Scheme, - Provider: req.Provider, - ProviderOrganization: req.ProviderOrganization, - } - - m, err := s.Store.Mappings(ctx).Add(ctx, mapping) - if err != nil { - Error(w, http.StatusInternalServerError, "failed to add mapping to database", s.Logger) - return - } - - cu := newMappingResponse(*m) - location(w, cu.Links.Self) - encodeJSON(w, http.StatusCreated, cu, s.Logger) -} - -// UpdateMapping updates a mapping -func (s *Service) UpdateMapping(w http.ResponseWriter, r *http.Request) { - var req mappingsRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := req.Valid(); err != nil { - invalidData(w, err, s.Logger) - return - } - - ctx := r.Context() - - // validate that the organization exists - if !s.organizationExists(ctx, req.Organization) { - invalidData(w, fmt.Errorf("organization does not exist"), s.Logger) - return - } - - mapping := &chronograf.Mapping{ - ID: req.ID, - Organization: req.Organization, - Scheme: req.Scheme, - Provider: req.Provider, - ProviderOrganization: req.ProviderOrganization, - } - - err := s.Store.Mappings(ctx).Update(ctx, mapping) - if err != nil { - Error(w, http.StatusInternalServerError, "failed to update mapping in database", s.Logger) - return - } - - cu := newMappingResponse(*mapping) - location(w, cu.Links.Self) - encodeJSON(w, http.StatusOK, cu, s.Logger) -} - -// RemoveMapping removes a mapping -func (s *Service) RemoveMapping(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id := httprouter.GetParamFromContext(ctx, "id") - - m, err := s.Store.Mappings(ctx).Get(ctx, id) - if err == chronograf.ErrMappingNotFound { - Error(w, http.StatusNotFound, err.Error(), s.Logger) - return - } - - if err != nil { - Error(w, http.StatusInternalServerError, "failed to retrieve mapping from database", s.Logger) - return - } - - if err := s.Store.Mappings(ctx).Delete(ctx, m); err != nil { - Error(w, http.StatusInternalServerError, "failed to remove mapping from database", s.Logger) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -func (s *Service) organizationExists(ctx context.Context, orgID string) bool { - if _, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &orgID}); err != nil { - return false - } - - return true -} diff --git a/chronograf/server/mapping_test.go b/chronograf/server/mapping_test.go deleted file mode 100644 index c2f026e193c..00000000000 --- a/chronograf/server/mapping_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package server - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/http/httptest" - "testing" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -func TestMappings_All(t *testing.T) { - type fields struct { - MappingsStore chronograf.MappingsStore - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - wants wants - }{ - { - name: "get all mappings", - fields: fields{ - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/mappings"},"mappings":[{"links":{"self":"/chronograf/v1/mappings/"},"id":"","organizationId":"0","provider":"*","scheme":"*","providerOrganization":"*"}]}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - MappingsStore: tt.fields.MappingsStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - s.Mappings(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Mappings() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Mappings() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Mappings() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func TestMappings_Add(t *testing.T) { - type fields struct { - MappingsStore chronograf.MappingsStore - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - mapping *chronograf.Mapping - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create new mapping", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - }, - MappingsStore: &mocks.MappingsStore{ - AddF: func(ctx context.Context, m *chronograf.Mapping) (*chronograf.Mapping, error) { - m.ID = "0" - return m, nil - }, - }, - }, - args: args{ - mapping: &chronograf.Mapping{ - Organization: "0", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - }, - wants: wants{ - statusCode: 201, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/mappings/0"},"id":"0","organizationId":"0","provider":"*","scheme":"*","providerOrganization":"*"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - MappingsStore: tt.fields.MappingsStore, - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - - buf, _ := json.Marshal(tt.args.mapping) - r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - - s.NewMapping(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Add() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Add() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Add() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func TestMappings_Update(t *testing.T) { - type fields struct { - MappingsStore chronograf.MappingsStore - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - mapping *chronograf.Mapping - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update new mapping", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - }, - MappingsStore: &mocks.MappingsStore{ - UpdateF: func(ctx context.Context, m *chronograf.Mapping) error { - return nil - }, - }, - }, - args: args{ - mapping: &chronograf.Mapping{ - ID: "1", - Organization: "0", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/mappings/1"},"id":"1","organizationId":"0","provider":"*","scheme":"*","providerOrganization":"*"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - MappingsStore: tt.fields.MappingsStore, - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - - buf, _ := json.Marshal(tt.args.mapping) - r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - r = r.WithContext(httprouter.WithParams( - context.Background(), - httprouter.Params{ - { - Key: "id", - Value: tt.args.mapping.ID, - }, - })) - - s.UpdateMapping(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Add() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Add() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Add() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func TestMappings_Remove(t *testing.T) { - type fields struct { - MappingsStore chronograf.MappingsStore - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove mapping", - fields: fields{ - MappingsStore: &mocks.MappingsStore{ - GetF: func(ctx context.Context, id string) (*chronograf.Mapping, error) { - return &chronograf.Mapping{ - ID: "1", - Organization: "0", - Provider: "*", - Scheme: "*", - ProviderOrganization: "*", - }, nil - }, - DeleteF: func(ctx context.Context, m *chronograf.Mapping) error { - return nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: 204, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - MappingsStore: tt.fields.MappingsStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(httprouter.WithParams( - context.Background(), - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - s.RemoveMapping(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. Remove() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. Remove() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. Remove() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} diff --git a/chronograf/server/me.go b/chronograf/server/me.go deleted file mode 100644 index 6a1f6d6601a..00000000000 --- a/chronograf/server/me.go +++ /dev/null @@ -1,400 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - "sort" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" - "github.com/influxdata/influxdb/v2/chronograf/organizations" - "golang.org/x/net/context" -) - -type meLinks struct { - Self string `json:"self"` // Self link mapping to this resource -} - -type meResponse struct { - *chronograf.User - Links meLinks `json:"links"` - Organizations []chronograf.Organization `json:"organizations"` - CurrentOrganization *chronograf.Organization `json:"currentOrganization,omitempty"` -} - -type noAuthMeResponse struct { - Links meLinks `json:"links"` -} - -func newNoAuthMeResponse() noAuthMeResponse { - return noAuthMeResponse{ - Links: meLinks{ - Self: "/chronograf/v1/me", - }, - } -} - -// If new user response is nil, return an empty meResponse because it -// indicates authentication is not needed -func newMeResponse(usr *chronograf.User, org string) meResponse { - base := "/chronograf/v1" - name := "me" - if usr != nil { - base = fmt.Sprintf("/chronograf/v1/organizations/%s/users", org) - name = PathEscape(fmt.Sprintf("%d", usr.ID)) - } - - return meResponse{ - User: usr, - Links: meLinks{ - Self: fmt.Sprintf("%s/%s", base, name), - }, - } -} - -// TODO: This Scheme value is hard-coded temporarily since we only currently -// support OAuth2. This hard-coding should be removed whenever we add -// support for other authentication schemes. -func getScheme(ctx context.Context) (string, error) { - return "oauth2", nil -} - -func getPrincipal(ctx context.Context) (oauth2.Principal, error) { - principal, ok := ctx.Value(oauth2.PrincipalKey).(oauth2.Principal) - if !ok { - return oauth2.Principal{}, fmt.Errorf("token not found") - } - - return principal, nil -} - -func getValidPrincipal(ctx context.Context) (oauth2.Principal, error) { - p, err := getPrincipal(ctx) - if err != nil { - return p, err - } - if p.Subject == "" { - return oauth2.Principal{}, fmt.Errorf("token not found") - } - if p.Issuer == "" { - return oauth2.Principal{}, fmt.Errorf("token not found") - } - return p, nil -} - -type meRequest struct { - // Organization is the OrganizationID - Organization string `json:"organization"` -} - -// UpdateMe changes the user's current organization on the JWT and responds -// with the same semantics as Me -func (s *Service) UpdateMe(auth oauth2.Authenticator) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - serverCtx := serverContext(ctx) - principal, err := auth.Validate(ctx, r) - if err != nil { - s.Logger.Error(fmt.Sprintf("Invalid principal: %v", err)) - Error(w, http.StatusForbidden, "invalid principal", s.Logger) - return - } - var req meRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - // validate that the organization exists - org, err := s.Store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &req.Organization}) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - // validate that user belongs to organization - ctx = context.WithValue(ctx, organizations.ContextKey, req.Organization) - - p, err := getValidPrincipal(ctx) - if err != nil { - invalidData(w, err, s.Logger) - return - } - if p.Organization == "" { - defaultOrg, err := s.Store.Organizations(serverCtx).DefaultOrganization(serverCtx) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - p.Organization = defaultOrg.ID - } - scheme, err := getScheme(ctx) - if err != nil { - invalidData(w, err, s.Logger) - return - } - _, err = s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{ - Name: &p.Subject, - Provider: &p.Issuer, - Scheme: &scheme, - }) - if err == chronograf.ErrUserNotFound { - // If the user was not found, check to see if they are a super admin. If - // they are, add them to the organization. - u, err := s.Store.Users(serverCtx).Get(serverCtx, chronograf.UserQuery{ - Name: &p.Subject, - Provider: &p.Issuer, - Scheme: &scheme, - }) - if err != nil { - Error(w, http.StatusForbidden, err.Error(), s.Logger) - return - } - - if !u.SuperAdmin { - // Since a user is not a part of this organization and not a super admin, - // we should tell them that they are Forbidden (403) from accessing this resource - Error(w, http.StatusForbidden, chronograf.ErrUserNotFound.Error(), s.Logger) - return - } - - // If the user is a super admin give them an admin role in the - // requested organization. - u.Roles = append(u.Roles, chronograf.Role{ - Organization: org.ID, - Name: org.DefaultRole, - }) - if err := s.Store.Users(serverCtx).Update(serverCtx, u); err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - } else if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - // TODO: change to principal.CurrentOrganization - principal.Organization = req.Organization - - if err := auth.Authorize(ctx, w, principal); err != nil { - Error(w, http.StatusInternalServerError, err.Error(), s.Logger) - return - } - - ctx = context.WithValue(ctx, oauth2.PrincipalKey, principal) - - s.Me(w, r.WithContext(ctx)) - } -} - -// Me does a findOrCreate based on the username in the context -func (s *Service) Me(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - if !s.UseAuth { - // If there's no authentication, return an empty user - res := newNoAuthMeResponse() - encodeJSON(w, http.StatusOK, res, s.Logger) - return - } - - p, err := getValidPrincipal(ctx) - if err != nil { - invalidData(w, err, s.Logger) - return - } - scheme, err := getScheme(ctx) - if err != nil { - invalidData(w, err, s.Logger) - return - } - - ctx = context.WithValue(ctx, organizations.ContextKey, p.Organization) - serverCtx := serverContext(ctx) - - defaultOrg, err := s.Store.Organizations(serverCtx).DefaultOrganization(serverCtx) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - if p.Organization == "" { - p.Organization = defaultOrg.ID - } - - usr, err := s.Store.Users(serverCtx).Get(serverCtx, chronograf.UserQuery{ - Name: &p.Subject, - Provider: &p.Issuer, - Scheme: &scheme, - }) - if err != nil && err != chronograf.ErrUserNotFound { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - // user exists - if usr != nil { - superAdmin := s.mapPrincipalToSuperAdmin(p) - if superAdmin && !usr.SuperAdmin { - usr.SuperAdmin = superAdmin - err := s.Store.Users(serverCtx).Update(serverCtx, usr) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - } - - currentOrg, err := s.Store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &p.Organization}) - if err == chronograf.ErrOrganizationNotFound { - // The intent is to force a the user to go through another auth flow - Error(w, http.StatusForbidden, "user's current organization was not found", s.Logger) - return - } - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - orgs, err := s.usersOrganizations(serverCtx, usr) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - res := newMeResponse(usr, currentOrg.ID) - res.Organizations = orgs - res.CurrentOrganization = currentOrg - encodeJSON(w, http.StatusOK, res, s.Logger) - return - } - - // Because we didnt find a user, making a new one - user := &chronograf.User{ - Name: p.Subject, - Provider: p.Issuer, - // TODO: This Scheme value is hard-coded temporarily since we only currently - // support OAuth2. This hard-coding should be removed whenever we add - // support for other authentication schemes. - Scheme: scheme, - // TODO(desa): this needs a better name - SuperAdmin: s.newUsersAreSuperAdmin(), - } - - superAdmin := s.mapPrincipalToSuperAdmin(p) - if superAdmin { - user.SuperAdmin = superAdmin - } - - roles, err := s.mapPrincipalToRoles(serverCtx, p) - if err != nil { - Error(w, http.StatusInternalServerError, err.Error(), s.Logger) - return - } - - if !superAdmin && len(roles) == 0 { - Error(w, http.StatusForbidden, "This Chronograf is private. To gain access, you must be explicitly added by an administrator.", s.Logger) - return - } - - // If the user is a superadmin, give them a role in the default organization - if user.SuperAdmin { - hasDefaultOrgRole := false - for _, role := range roles { - if role.Organization == defaultOrg.ID { - hasDefaultOrgRole = true - break - } - } - if !hasDefaultOrgRole { - roles = append(roles, chronograf.Role{ - Name: defaultOrg.DefaultRole, - Organization: defaultOrg.ID, - }) - } - } - - user.Roles = roles - - newUser, err := s.Store.Users(serverCtx).Add(serverCtx, user) - if err != nil { - msg := fmt.Errorf("error storing user %s: %v", user.Name, err) - unknownErrorWithMessage(w, msg, s.Logger) - return - } - - orgs, err := s.usersOrganizations(serverCtx, newUser) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - currentOrg, err := s.Store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &p.Organization}) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - res := newMeResponse(newUser, currentOrg.ID) - res.Organizations = orgs - res.CurrentOrganization = currentOrg - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -func (s *Service) firstUser() bool { - serverCtx := serverContext(context.Background()) - numUsers, err := s.Store.Users(serverCtx).Num(serverCtx) - if err != nil { - return false - } - - return numUsers == 0 -} -func (s *Service) newUsersAreSuperAdmin() bool { - // It's not necessary to enforce that the first user is superAdmin here, since - // superAdminNewUsers defaults to true, but there's nothing else in the - // application that dictates that it must be true. - // So for that reason, we kept this here for now. We've discussed the - // future possibility of allowing users to override default values via CLI and - // this case could possibly happen then. - if s.firstUser() { - return true - } - serverCtx := serverContext(context.Background()) - cfg, err := s.Store.Config(serverCtx).Get(serverCtx) - if err != nil { - return false - } - return cfg.Auth.SuperAdminNewUsers -} - -func (s *Service) usersOrganizations(ctx context.Context, u *chronograf.User) ([]chronograf.Organization, error) { - if u == nil { - // TODO(desa): better error - return nil, fmt.Errorf("user was nil") - } - - orgIDs := map[string]bool{} - for _, role := range u.Roles { - orgIDs[role.Organization] = true - } - - orgs := []chronograf.Organization{} - for orgID := range orgIDs { - org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &orgID}) - - // There can be race conditions between deleting a organization and the me query - if err == chronograf.ErrOrganizationNotFound { - continue - } - - // Any other error should cause an error to be returned - if err != nil { - return nil, err - } - orgs = append(orgs, *org) - } - - sort.Slice(orgs, func(i, j int) bool { - return orgs[i].ID < orgs[j].ID - }) - - return orgs, nil -} diff --git a/chronograf/server/me_test.go b/chronograf/server/me_test.go deleted file mode 100644 index 8848b477ed7..00000000000 --- a/chronograf/server/me_test.go +++ /dev/null @@ -1,1455 +0,0 @@ -package server - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -func TestService_Me(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - OrganizationsStore chronograf.OrganizationsStore - MappingsStore chronograf.MappingsStore - ConfigStore chronograf.ConfigStore - SuperAdminProviderGroups superAdminProviderGroups - Logger chronograf.Logger - UseAuth bool - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - principal oauth2.Principal - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Existing user - not member of any organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "0": - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.ViewerRoleName, - }, nil - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "The Bad Place", - }, nil - } - return nil, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "me", - Provider: "github", - Scheme: "oauth2", - }, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "me", - Issuer: "github", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"me","roles":null,"provider":"github","scheme":"oauth2","links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[],"currentOrganization":{"id":"0","name":"Default","defaultRole":"viewer"}}`, - }, - { - name: "Existing superadmin - not member of any organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{}, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "0": - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.ViewerRoleName, - }, nil - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "The Bad Place", - }, nil - } - return nil, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "me", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "me", - Issuer: "github", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"me","roles":null,"provider":"github","scheme":"oauth2","superAdmin":true,"links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[],"currentOrganization":{"id":"0","name":"Default","defaultRole":"viewer"}}`, - }, - { - name: "Existing user - organization doesn't exist", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{}, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "0": - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, chronograf.ErrOrganizationNotFound - }, - }, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "me", - Provider: "github", - Scheme: "oauth2", - }, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "me", - Issuer: "github", - Organization: "1", - }, - wantStatus: http.StatusForbidden, - wantContentType: "application/json", - wantBody: `{"code":403,"message":"user's current organization was not found"}`, - }, - { - name: "default mapping applies to new user", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: true, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - AllF: func(ctx context.Context) ([]chronograf.Organization, error) { - return []chronograf.Organization{ - chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","superAdmin":true,"roles":[{"name":"viewer","organization":"0"}],"provider":"auth0","scheme":"oauth2","links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Gnarly Default","defaultRole":"viewer"}],"currentOrganization":{"id":"0","name":"The Gnarly Default","defaultRole":"viewer"}}`, - }, - { - name: "New user - New users not super admin, not first user", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - AllF: func(ctx context.Context) ([]chronograf.Organization, error) { - return []chronograf.Organization{ - chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","roles":[{"name":"viewer","organization":"0"}],"provider":"auth0","scheme":"oauth2","links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Gnarly Default","defaultRole":"viewer"}],"currentOrganization":{"id":"0","name":"The Gnarly Default","defaultRole":"viewer"}}`, - }, - { - name: "New user - New users not super admin, first user", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - AllF: func(ctx context.Context) ([]chronograf.Organization, error) { - return []chronograf.Organization{ - chronograf.Organization{ - ID: "0", - Name: "The Gnarly Default", - DefaultRole: roles.ViewerRoleName, - }, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 0, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","superAdmin":true,"roles":[{"name":"viewer","organization":"0"}],"provider":"auth0","scheme":"oauth2","links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Gnarly Default","defaultRole":"viewer"}],"currentOrganization":{"id":"0","name":"The Gnarly Default","defaultRole":"viewer"}}`, - }, - { - name: "Error adding user", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{}, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - }, nil - }, - AllF: func(ctx context.Context) ([]chronograf.Organization, error) { - return []chronograf.Organization{ - chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.ViewerRoleName, - }, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return nil, fmt.Errorf("why Heavy?") - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "heroku", - }, - wantStatus: http.StatusForbidden, - wantContentType: "application/json", - wantBody: `{"code":403,"message":"This Chronograf is private. To gain access, you must be explicitly added by an administrator."}`, - }, - { - name: "No Auth", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: false, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"links":{"self":"/chronograf/v1/me"}}`, - }, - { - name: "Empty Principal", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - Logger: &chronograf.NoopLogger{}, - }, - wantStatus: http.StatusUnprocessableEntity, - principal: oauth2.Principal{ - Subject: "", - Issuer: "", - }, - }, - { - name: "new user - Chronograf is private", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - ConfigStore: mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{}, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - }, - wantStatus: http.StatusForbidden, - wantContentType: "application/json", - wantBody: `{"code":403,"message":"This Chronograf is private. To gain access, you must be explicitly added by an administrator."}`, - }, - { - name: "new user - Chronograf is private, user is in auth0 superadmin group", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - SuperAdminProviderGroups: superAdminProviderGroups{ - auth0: "example", - }, - Logger: &chronograf.NoopLogger{}, - ConfigStore: mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{}, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - Group: "not_example,example", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","roles":[{"name":"member","organization":"0"}],"provider":"auth0","scheme":"oauth2","superAdmin":true,"links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Bad Place","defaultRole":"member"}],"currentOrganization":{"id":"0","name":"The Bad Place","defaultRole":"member"}}`, - }, - { - name: "new user - Chronograf is private, user is not in auth0 superadmin group", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - SuperAdminProviderGroups: superAdminProviderGroups{ - auth0: "example", - }, - Logger: &chronograf.NoopLogger{}, - ConfigStore: mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{}, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - Group: "not_example", - }, - wantStatus: http.StatusForbidden, - wantContentType: "application/json", - wantBody: `{"code":403,"message":"This Chronograf is private. To gain access, you must be explicitly added by an administrator."}`, - }, - { - name: "new user - Chronograf is not private, user is in auth0 superadmin group", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - SuperAdminProviderGroups: superAdminProviderGroups{ - auth0: "example", - }, - Logger: &chronograf.NoopLogger{}, - ConfigStore: mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - Group: "example", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","roles":[{"name":"member","organization":"0"}],"provider":"auth0","scheme":"oauth2","superAdmin":true,"links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Bad Place","defaultRole":"member"}],"currentOrganization":{"id":"0","name":"The Bad Place","defaultRole":"member"}}`, - }, - { - name: "new user - Chronograf is not private (has a fully open wildcard mapping to an org), user is not in auth0 superadmin group", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - SuperAdminProviderGroups: superAdminProviderGroups{ - auth0: "example", - }, - Logger: &chronograf.NoopLogger{}, - ConfigStore: mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return nil, chronograf.ErrUserNotFound - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - Group: "not_example", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","roles":[{"name":"member","organization":"0"}],"provider":"auth0","scheme":"oauth2","links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Bad Place","defaultRole":"member"}],"currentOrganization":{"id":"0","name":"The Bad Place","defaultRole":"member"}}`, - }, - { - name: "Existing user - Chronograf is not private, user doesn't have SuperAdmin status, user is in auth0 superadmin group", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - SuperAdminProviderGroups: superAdminProviderGroups{ - auth0: "example", - }, - Logger: &chronograf.NoopLogger{}, - ConfigStore: mocks.ConfigStore{ - Config: &chronograf.Config{}, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "secret", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.MemberRoleName, - Organization: "0", - }, - }, - }, nil - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - Group: "example", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","roles":[{"name":"member","organization":"0"}],"provider":"auth0","scheme":"oauth2","superAdmin":true,"links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Bad Place","defaultRole":"member"}],"currentOrganization":{"id":"0","name":"The Bad Place","defaultRole":"member"}}`, - }, - { - name: "Existing user - Chronograf is not private, user has SuperAdmin status, user is in auth0 superadmin group", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - }, - fields: fields{ - UseAuth: true, - SuperAdminProviderGroups: superAdminProviderGroups{ - auth0: "example", - }, - Logger: &chronograf.NoopLogger{}, - ConfigStore: mocks.ConfigStore{ - Config: &chronograf.Config{}, - }, - MappingsStore: &mocks.MappingsStore{ - AllF: func(ctx context.Context) ([]chronograf.Mapping, error) { - return []chronograf.Mapping{ - { - Organization: "0", - Provider: chronograf.MappingWildcard, - Scheme: chronograf.MappingWildcard, - ProviderOrganization: chronograf.MappingWildcard, - }, - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "The Bad Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - UsersStore: &mocks.UsersStore{ - NumF: func(ctx context.Context) (int, error) { - // This function gets to verify that there is at least one first user - return 1, nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "secret", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.MemberRoleName, - Organization: "0", - }, - }, - SuperAdmin: true, - }, nil - }, - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return u, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "secret", - Issuer: "auth0", - Group: "example", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"secret","roles":[{"name":"member","organization":"0"}],"provider":"auth0","scheme":"oauth2","superAdmin":true,"links":{"self":"/chronograf/v1/organizations/0/users/0"},"organizations":[{"id":"0","name":"The Bad Place","defaultRole":"member"}],"currentOrganization":{"id":"0","name":"The Bad Place","defaultRole":"member"}}`, - }, - } - for _, tt := range tests { - tt.args.r = tt.args.r.WithContext(context.WithValue(context.Background(), oauth2.PrincipalKey, tt.principal)) - s := &Service{ - Store: &mocks.Store{ - UsersStore: tt.fields.UsersStore, - OrganizationsStore: tt.fields.OrganizationsStore, - MappingsStore: tt.fields.MappingsStore, - ConfigStore: tt.fields.ConfigStore, - }, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - SuperAdminProviderGroups: tt.fields.SuperAdminProviderGroups, - } - - s.Me(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. Me() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. Me() = %v, want %v", tt.name, content, tt.wantContentType) - } - if tt.wantBody == "" { - continue - } - if eq, err := jsonEqual(tt.wantBody, string(body)); err != nil || !eq { - t.Errorf("%q. Me() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - } -} - -func TestService_UpdateMe(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - UseAuth bool - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - meRequest *meRequest - auth mocks.Authenticator - } - tests := []struct { - name string - fields fields - args args - principal oauth2.Principal - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Set the current User's organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - meRequest: &meRequest{ - Organization: "1337", - }, - auth: mocks.Authenticator{}, - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "me", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.AdminRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - switch *q.ID { - case "0": - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.AdminRoleName, - }, nil - case "1337": - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - } - return nil, nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "me", - Issuer: "github", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"me","roles":[{"name":"admin","organization":"1337"}],"provider":"github","scheme":"oauth2","links":{"self":"/chronograf/v1/organizations/1337/users/0"},"organizations":[{"id":"1337","name":"The ShillBillThrilliettas"}],"currentOrganization":{"id":"1337","name":"The ShillBillThrilliettas"}}`, - }, - { - name: "Change the current User's organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - meRequest: &meRequest{ - Organization: "1337", - }, - auth: mocks.Authenticator{}, - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "me", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.EditorRoleName, - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - switch *q.ID { - case "1337": - return &chronograf.Organization{ - ID: "1337", - Name: "The ThrillShilliettos", - }, nil - case "0": - return &chronograf.Organization{ - ID: "0", - Name: "Default", - DefaultRole: roles.EditorRoleName, - }, nil - } - return nil, nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "me", - Issuer: "github", - Organization: "1338", - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"name":"me","roles":[{"name":"admin","organization":"1337"}],"provider":"github","scheme":"oauth2","links":{"self":"/chronograf/v1/organizations/1337/users/0"},"organizations":[{"id":"1337","name":"The ThrillShilliettos"}],"currentOrganization":{"id":"1337","name":"The ThrillShilliettos"}}`, - }, - { - name: "Unable to find requested user in valid organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - meRequest: &meRequest{ - Organization: "1337", - }, - auth: mocks.Authenticator{}, - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "me", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1338", - }, - }, - }, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - if q.ID == nil { - return nil, fmt.Errorf("invalid organization query: missing ID") - } - return &chronograf.Organization{ - ID: "1337", - Name: "The ShillBillThrilliettas", - }, nil - }, - }, - }, - principal: oauth2.Principal{ - Subject: "me", - Issuer: "github", - Organization: "1338", - }, - wantStatus: http.StatusForbidden, - wantContentType: "application/json", - wantBody: `{"code":403,"message":"user not found"}`, - }, - { - name: "Unable to find requested organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("GET", "http://example.com/foo", nil), - meRequest: &meRequest{ - Organization: "1337", - }, - auth: mocks.Authenticator{}, - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - if q.Name == nil || q.Provider == nil || q.Scheme == nil { - return nil, fmt.Errorf("invalid user query: missing Name, Provider, and/or Scheme") - } - return &chronograf.User{ - Name: "me", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1337", - }, - }, - }, nil - }, - UpdateF: func(ctx context.Context, u *chronograf.User) error { - return nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "0", - }, nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return nil, chronograf.ErrOrganizationNotFound - }, - }, - }, - principal: oauth2.Principal{ - Subject: "me", - Issuer: "github", - Organization: "1338", - }, - wantStatus: http.StatusBadRequest, - wantContentType: "application/json", - wantBody: `{"code":400,"message":"organization not found"}`, - }, - } - for _, tt := range tests { - tt.args.r = tt.args.r.WithContext(context.WithValue(context.Background(), oauth2.PrincipalKey, tt.principal)) - s := &Service{ - Store: &Store{ - UsersStore: tt.fields.UsersStore, - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - } - - buf, _ := json.Marshal(tt.args.meRequest) - tt.args.r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - tt.args.auth.Principal = tt.principal - - s.UpdateMe(&tt.args.auth)(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. UpdateMe() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. UpdateMe() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, err := jsonEqual(tt.wantBody, string(body)); err != nil || !eq { - t.Errorf("%q. UpdateMe() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - } -} diff --git a/chronograf/server/middle.go b/chronograf/server/middle.go deleted file mode 100644 index b6ba8afc2e1..00000000000 --- a/chronograf/server/middle.go +++ /dev/null @@ -1,57 +0,0 @@ -package server - -import ( - "net/http" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" -) - -// RouteMatchesPrincipal checks that the organization on context matches the organization -// in the route. -func RouteMatchesPrincipal( - store DataStore, - useAuth bool, - logger chronograf.Logger, - next http.HandlerFunc, -) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - if !useAuth { - next(w, r) - return - } - - log := logger. - WithField("component", "org_match"). - WithField("remote_addr", r.RemoteAddr). - WithField("method", r.Method). - WithField("url", r.URL) - - orgID := httprouter.GetParamFromContext(ctx, "oid") - p, err := getValidPrincipal(ctx) - if err != nil { - log.Error("Failed to retrieve principal from context") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - - if p.Organization == "" { - defaultOrg, err := store.Organizations(ctx).DefaultOrganization(ctx) - if err != nil { - log.Error("Failed to look up default organization") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - p.Organization = defaultOrg.ID - } - - if orgID != p.Organization { - log.Error("Route organization does not match the organization on principal") - Error(w, http.StatusForbidden, "User is not authorized", logger) - return - } - - next(w, r) - } -} diff --git a/chronograf/server/middle_test.go b/chronograf/server/middle_test.go deleted file mode 100644 index 12dbb007129..00000000000 --- a/chronograf/server/middle_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package server - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -func TestRouteMatchesPrincipal(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - } - type args struct { - useAuth bool - principal *oauth2.Principal - routerParams *httprouter.Params - } - type wants struct { - matches bool - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "route matches request params", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "default", - }, nil - }, - }, - }, - args: args{ - useAuth: true, - principal: &oauth2.Principal{ - Subject: "user", - Issuer: "github", - Organization: "default", - }, - routerParams: &httprouter.Params{ - { - Key: "oid", - Value: "default", - }, - }, - }, - wants: wants{ - matches: true, - }, - }, - { - name: "route does not match request params", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "default", - }, nil - }, - }, - }, - args: args{ - useAuth: true, - principal: &oauth2.Principal{ - Subject: "user", - Issuer: "github", - Organization: "default", - }, - routerParams: &httprouter.Params{ - { - Key: "oid", - Value: "other", - }, - }, - }, - wants: wants{ - matches: false, - }, - }, - { - name: "missing principal", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "default", - }, nil - }, - }, - }, - args: args{ - useAuth: true, - principal: nil, - routerParams: &httprouter.Params{ - { - Key: "oid", - Value: "other", - }, - }, - }, - wants: wants{ - matches: false, - }, - }, - { - name: "not using auth", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "default", - }, nil - }, - }, - }, - args: args{ - useAuth: false, - principal: &oauth2.Principal{ - Subject: "user", - Issuer: "github", - Organization: "default", - }, - routerParams: &httprouter.Params{ - { - Key: "oid", - Value: "other", - }, - }, - }, - wants: wants{ - matches: true, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - store := &mocks.Store{ - OrganizationsStore: tt.fields.OrganizationsStore, - } - var matches bool - next := func(w http.ResponseWriter, r *http.Request) { - matches = true - } - fn := RouteMatchesPrincipal( - store, - tt.args.useAuth, - tt.fields.Logger, - next, - ) - - w := httptest.NewRecorder() - url := "http://any.url" - r := httptest.NewRequest( - "GET", - url, - nil, - ) - if tt.args.routerParams != nil { - r = r.WithContext(httprouter.WithParams(r.Context(), *tt.args.routerParams)) - } - if tt.args.principal == nil { - r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, nil)) - } else { - r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, *tt.args.principal)) - } - fn(w, r) - - if matches != tt.wants.matches { - t.Errorf("%q. RouteMatchesPrincipal() = %v, expected %v", tt.name, matches, tt.wants.matches) - } - - if !matches && w.Code != http.StatusForbidden { - t.Errorf("%q. RouteMatchesPrincipal() Status Code = %v, expected %v", tt.name, w.Code, http.StatusForbidden) - } - - }) - } -} diff --git a/chronograf/server/mountable_router.go b/chronograf/server/mountable_router.go deleted file mode 100644 index 1ae275cfdf6..00000000000 --- a/chronograf/server/mountable_router.go +++ /dev/null @@ -1,59 +0,0 @@ -package server - -import ( - "net/http" - libpath "path" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -var _ chronograf.Router = &MountableRouter{} - -// MountableRouter is an implementation of a chronograf.Router which supports -// prefixing each route of a Delegated chronograf.Router with a prefix. -type MountableRouter struct { - Prefix string - Delegate chronograf.Router -} - -// DELETE defines a route responding to a DELETE request that will be prefixed -// with the configured route prefix -func (mr *MountableRouter) DELETE(path string, handler http.HandlerFunc) { - mr.Delegate.DELETE(libpath.Join(mr.Prefix, path), handler) -} - -// GET defines a route responding to a GET request that will be prefixed -// with the configured route prefix -func (mr *MountableRouter) GET(path string, handler http.HandlerFunc) { - mr.Delegate.GET(libpath.Join(mr.Prefix, path), handler) -} - -// POST defines a route responding to a POST request that will be prefixed -// with the configured route prefix -func (mr *MountableRouter) POST(path string, handler http.HandlerFunc) { - mr.Delegate.POST(libpath.Join(mr.Prefix, path), handler) -} - -// PUT defines a route responding to a PUT request that will be prefixed -// with the configured route prefix -func (mr *MountableRouter) PUT(path string, handler http.HandlerFunc) { - mr.Delegate.PUT(libpath.Join(mr.Prefix, path), handler) -} - -// PATCH defines a route responding to a PATCH request that will be prefixed -// with the configured route prefix -func (mr *MountableRouter) PATCH(path string, handler http.HandlerFunc) { - mr.Delegate.PATCH(libpath.Join(mr.Prefix, path), handler) -} - -// Handler defines a prefixed route responding to a request type specified in -// the method parameter -func (mr *MountableRouter) Handler(method string, path string, handler http.Handler) { - mr.Delegate.Handler(method, libpath.Join(mr.Prefix, path), handler) -} - -// ServeHTTP is an implementation of http.Handler which delegates to the -// configured Delegate's implementation of http.Handler -func (mr *MountableRouter) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - mr.Delegate.ServeHTTP(rw, r) -} diff --git a/chronograf/server/mountable_router_test.go b/chronograf/server/mountable_router_test.go deleted file mode 100644 index 2eec8a0593a..00000000000 --- a/chronograf/server/mountable_router_test.go +++ /dev/null @@ -1,240 +0,0 @@ -package server_test - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf/server" -) - -func Test_MountableRouter_MountsRoutesUnderPrefix(t *testing.T) { - t.Parallel() - - mr := &server.MountableRouter{ - Prefix: "/chronograf", - Delegate: httprouter.New(), - } - - expected := "Hello?! McFly?! Anybody in there?!" - mr.GET("/biff", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - fmt.Fprint(rw, expected) - })) - - ts := httptest.NewServer(mr) - defer ts.Close() - - resp, err := http.Get(ts.URL + "/chronograf/biff") - if err != nil { - t.Fatal("Unexpected error fetching from mounted router: err:", err) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal("Unexpected error decoding response body: err:", err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatal("Expected 200 but received", resp.StatusCode) - } - - if string(body) != expected { - t.Fatalf("Unexpected response body: Want: \"%s\". Got: \"%s\"", expected, string(body)) - } -} - -func Test_MountableRouter_PrefixesPosts(t *testing.T) { - t.Parallel() - - mr := &server.MountableRouter{ - Prefix: "/chronograf", - Delegate: httprouter.New(), - } - - expected := "Great Scott!" - actual := make([]byte, len(expected)) - mr.POST("/doc", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - if _, err := io.ReadFull(r.Body, actual); err != nil { - rw.WriteHeader(http.StatusInternalServerError) - } else { - rw.WriteHeader(http.StatusOK) - } - })) - - ts := httptest.NewServer(mr) - defer ts.Close() - - resp, err := http.Post(ts.URL+"/chronograf/doc", "text/plain", strings.NewReader(expected)) - if err != nil { - t.Fatal("Unexpected error posting to mounted router: err:", err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatal("Expected 200 but received", resp.StatusCode) - } - - if string(actual) != expected { - t.Fatalf("Unexpected request body: Want: \"%s\". Got: \"%s\"", expected, string(actual)) - } -} - -func Test_MountableRouter_PrefixesPuts(t *testing.T) { - t.Parallel() - - mr := &server.MountableRouter{ - Prefix: "/chronograf", - Delegate: httprouter.New(), - } - - expected := "Great Scott!" - actual := make([]byte, len(expected)) - mr.PUT("/doc", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - if _, err := io.ReadFull(r.Body, actual); err != nil { - rw.WriteHeader(http.StatusInternalServerError) - } else { - rw.WriteHeader(http.StatusOK) - } - })) - - ts := httptest.NewServer(mr) - defer ts.Close() - - req := httptest.NewRequest(http.MethodPut, ts.URL+"/chronograf/doc", strings.NewReader(expected)) - req.Header.Set("Content-Type", "text/plain; charset=utf-8") - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(expected))) - req.RequestURI = "" - - client := http.Client{} - resp, err := client.Do(req) - if err != nil { - t.Fatal("Unexpected error posting to mounted router: err:", err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatal("Expected 200 but received", resp.StatusCode) - } - - if string(actual) != expected { - t.Fatalf("Unexpected request body: Want: \"%s\". Got: \"%s\"", expected, string(actual)) - } -} - -func Test_MountableRouter_PrefixesDeletes(t *testing.T) { - t.Parallel() - - mr := &server.MountableRouter{ - Prefix: "/chronograf", - Delegate: httprouter.New(), - } - - mr.DELETE("/proto1985", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusNoContent) - })) - - ts := httptest.NewServer(mr) - defer ts.Close() - - req := httptest.NewRequest(http.MethodDelete, ts.URL+"/chronograf/proto1985", nil) - req.RequestURI = "" - - client := http.Client{} - resp, err := client.Do(req) - if err != nil { - t.Fatal("Unexpected error sending request to mounted router: err:", err) - } - - if resp.StatusCode != http.StatusNoContent { - t.Fatal("Expected 204 but received", resp.StatusCode) - } -} - -func Test_MountableRouter_PrefixesPatches(t *testing.T) { - t.Parallel() - - type Character struct { - Name string - Items []string - } - - mr := &server.MountableRouter{ - Prefix: "/chronograf", - Delegate: httprouter.New(), - } - - biff := Character{"biff", []string{"sports almanac"}} - mr.PATCH("/1955", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - c := Character{} - err := json.NewDecoder(r.Body).Decode(&c) - if err != nil { - rw.WriteHeader(http.StatusBadRequest) - } else { - biff.Items = c.Items - rw.WriteHeader(http.StatusOK) - } - })) - - ts := httptest.NewServer(mr) - defer ts.Close() - - r, w := io.Pipe() - go func() { - _ = json.NewEncoder(w).Encode(Character{"biff", []string{}}) - w.Close() - }() - - req := httptest.NewRequest(http.MethodPatch, ts.URL+"/chronograf/1955", r) - req.RequestURI = "" - - client := http.Client{} - resp, err := client.Do(req) - if err != nil { - t.Fatal("Unexpected error sending request to mounted router: err:", err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatal("Expected 200 but received", resp.StatusCode) - } - - if len(biff.Items) != 0 { - t.Fatal("Failed to alter history, biff still has the sports almanac") - } -} - -func Test_MountableRouter_PrefixesHandler(t *testing.T) { - t.Parallel() - - mr := &server.MountableRouter{ - Prefix: "/chronograf", - Delegate: httprouter.New(), - } - - mr.Handler(http.MethodGet, "/recklessAmountOfPower", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte("1.21 Gigawatts!")) - })) - - ts := httptest.NewServer(mr) - defer ts.Close() - - req := httptest.NewRequest(http.MethodGet, ts.URL+"/chronograf/recklessAmountOfPower", nil) - req.RequestURI = "" - - client := http.Client{} - resp, err := client.Do(req) - if err != nil { - t.Fatal("Unexpected error sending request to mounted router: err:", err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatal("Expected 200 but received", resp.StatusCode) - } -} diff --git a/chronograf/server/mux.go b/chronograf/server/mux.go deleted file mode 100644 index 9200c6bbd39..00000000000 --- a/chronograf/server/mux.go +++ /dev/null @@ -1,407 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - "path" - "strconv" - "strings" - - _ "net/http/pprof" - - "github.com/NYTimes/gziphandler" - "github.com/bouk/httprouter" - jhttprouter "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -const ( - // JSONType the mimetype for a json request - JSONType = "application/json" -) - -// MuxOpts are the options for the router. Mostly related to auth. -type MuxOpts struct { - Logger chronograf.Logger - Develop bool // Develop loads assets from filesystem instead of bindata - Basepath string // URL path prefix under which all chronograf routes will be mounted - UseAuth bool // UseAuth turns on Github OAuth and JWT - Auth oauth2.Authenticator // Auth is used to authenticate and authorize - ProviderFuncs []func(func(oauth2.Provider, oauth2.Mux)) - StatusFeedURL string // JSON Feed URL for the client Status page News Feed - CustomLinks map[string]string // Any custom external links for client's User menu - PprofEnabled bool // Mount pprof routes for profiling -} - -// NewMux attaches all the route handlers; handler returned servers chronograf. -func NewMux(opts MuxOpts, service Service) http.Handler { - hr := httprouter.New() - - /* React Application */ - assets := Assets(AssetsOpts{ - Develop: opts.Develop, - Logger: opts.Logger, - }) - - // Prefix any URLs found in the React assets with any configured basepath - prefixedAssets := NewDefaultURLPrefixer(opts.Basepath, assets, opts.Logger) - - // Compress the assets with gzip if an accepted encoding - compressed := gziphandler.GzipHandler(prefixedAssets) - - // The react application handles all the routing if the server does not - // know about the route. This means that we never have unknown routes on - // the server. - hr.NotFound = compressed - - var router chronograf.Router = hr - - // Set route prefix for all routes if basepath is present - if opts.Basepath != "" { - router = &MountableRouter{ - Prefix: opts.Basepath, - Delegate: hr, - } - - //The assets handler is always unaware of basepaths, so the - // basepath needs to always be removed before sending requests to it - hr.NotFound = http.StripPrefix(opts.Basepath, hr.NotFound) - } - - EnsureMember := func(next http.HandlerFunc) http.HandlerFunc { - return AuthorizedUser( - service.Store, - opts.UseAuth, - roles.MemberRoleName, - opts.Logger, - next, - ) - } - _ = EnsureMember - EnsureViewer := func(next http.HandlerFunc) http.HandlerFunc { - return AuthorizedUser( - service.Store, - opts.UseAuth, - roles.ViewerRoleName, - opts.Logger, - next, - ) - } - EnsureEditor := func(next http.HandlerFunc) http.HandlerFunc { - return AuthorizedUser( - service.Store, - opts.UseAuth, - roles.EditorRoleName, - opts.Logger, - next, - ) - } - EnsureAdmin := func(next http.HandlerFunc) http.HandlerFunc { - return AuthorizedUser( - service.Store, - opts.UseAuth, - roles.AdminRoleName, - opts.Logger, - next, - ) - } - EnsureSuperAdmin := func(next http.HandlerFunc) http.HandlerFunc { - return AuthorizedUser( - service.Store, - opts.UseAuth, - roles.SuperAdminStatus, - opts.Logger, - next, - ) - } - - rawStoreAccess := func(next http.HandlerFunc) http.HandlerFunc { - return RawStoreAccess(opts.Logger, next) - } - - ensureOrgMatches := func(next http.HandlerFunc) http.HandlerFunc { - return RouteMatchesPrincipal( - service.Store, - opts.UseAuth, - opts.Logger, - next, - ) - } - - if opts.PprofEnabled { - // add profiling routes - router.GET("/debug/pprof/:thing", http.DefaultServeMux.ServeHTTP) - } - - /* Documentation */ - router.GET("/swagger.json", Spec()) - router.GET("/docs", Redoc("/swagger.json")) - - /* API */ - // Organizations - router.GET("/chronograf/v1/organizations", EnsureAdmin(service.Organizations)) - router.POST("/chronograf/v1/organizations", EnsureSuperAdmin(service.NewOrganization)) - - router.GET("/chronograf/v1/organizations/:oid", EnsureAdmin(service.OrganizationID)) - router.PATCH("/chronograf/v1/organizations/:oid", EnsureSuperAdmin(service.UpdateOrganization)) - router.DELETE("/chronograf/v1/organizations/:oid", EnsureSuperAdmin(service.RemoveOrganization)) - - // Mappings - router.GET("/chronograf/v1/mappings", EnsureSuperAdmin(service.Mappings)) - router.POST("/chronograf/v1/mappings", EnsureSuperAdmin(service.NewMapping)) - - router.PUT("/chronograf/v1/mappings/:id", EnsureSuperAdmin(service.UpdateMapping)) - router.DELETE("/chronograf/v1/mappings/:id", EnsureSuperAdmin(service.RemoveMapping)) - - // Source Proxy to Influx; Has gzip compression around the handler - influx := gziphandler.GzipHandler(http.HandlerFunc(EnsureViewer(service.Influx))) - router.Handler("POST", "/chronograf/v1/sources/:id/proxy", influx) - - // Write proxies line protocol write requests to InfluxDB - router.POST("/chronograf/v1/sources/:id/write", EnsureViewer(service.Write)) - - // Queries is used to analyze a specific queries and does not create any - // resources. It's a POST because Queries are POSTed to InfluxDB, but this - // only modifies InfluxDB resources with certain metaqueries, e.g. DROP DATABASE. - // - // Admins should ensure that the InfluxDB source as the proper permissions - // intended for Chronograf Users with the Viewer Role type. - router.POST("/chronograf/v1/sources/:id/queries", EnsureViewer(service.Queries)) - - // Annotations are user-defined events associated with this source - router.GET("/chronograf/v1/sources/:id/annotations", EnsureViewer(service.Annotations)) - router.POST("/chronograf/v1/sources/:id/annotations", EnsureEditor(service.NewAnnotation)) - router.GET("/chronograf/v1/sources/:id/annotations/:aid", EnsureViewer(service.Annotation)) - router.DELETE("/chronograf/v1/sources/:id/annotations/:aid", EnsureEditor(service.RemoveAnnotation)) - router.PATCH("/chronograf/v1/sources/:id/annotations/:aid", EnsureEditor(service.UpdateAnnotation)) - - // All possible permissions for users in this source - router.GET("/chronograf/v1/sources/:id/permissions", EnsureViewer(service.Permissions)) - - // Services are resources that chronograf proxies to - router.GET("/chronograf/v1/sources/:id/services", EnsureViewer(service.Services)) - router.POST("/chronograf/v1/sources/:id/services", EnsureEditor(service.NewService)) - router.GET("/chronograf/v1/sources/:id/services/:kid", EnsureViewer(service.ServiceID)) - router.PATCH("/chronograf/v1/sources/:id/services/:kid", EnsureEditor(service.UpdateService)) - router.DELETE("/chronograf/v1/sources/:id/services/:kid", EnsureEditor(service.RemoveService)) - - // Service Proxy - router.GET("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureViewer(service.ProxyGet)) - router.POST("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureEditor(service.ProxyPost)) - router.PATCH("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureEditor(service.ProxyPatch)) - router.DELETE("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureEditor(service.ProxyDelete)) - - // Layouts - router.GET("/chronograf/v1/layouts", EnsureViewer(service.Layouts)) - router.GET("/chronograf/v1/layouts/:id", EnsureViewer(service.LayoutsID)) - - // Users associated with Chronograf - router.GET("/chronograf/v1/me", service.Me) - - // Set current chronograf organization the user is logged into - router.PUT("/chronograf/v1/me", service.UpdateMe(opts.Auth)) - - // TODO(desa): what to do about admin's being able to set superadmin - router.GET("/chronograf/v1/organizations/:oid/users", EnsureAdmin(ensureOrgMatches(service.Users))) - router.POST("/chronograf/v1/organizations/:oid/users", EnsureAdmin(ensureOrgMatches(service.NewUser))) - - router.GET("/chronograf/v1/organizations/:oid/users/:id", EnsureAdmin(ensureOrgMatches(service.UserID))) - router.DELETE("/chronograf/v1/organizations/:oid/users/:id", EnsureAdmin(ensureOrgMatches(service.RemoveUser))) - router.PATCH("/chronograf/v1/organizations/:oid/users/:id", EnsureAdmin(ensureOrgMatches(service.UpdateUser))) - - router.GET("/chronograf/v1/users", EnsureSuperAdmin(rawStoreAccess(service.Users))) - router.POST("/chronograf/v1/users", EnsureSuperAdmin(rawStoreAccess(service.NewUser))) - - router.GET("/chronograf/v1/users/:id", EnsureSuperAdmin(rawStoreAccess(service.UserID))) - router.DELETE("/chronograf/v1/users/:id", EnsureSuperAdmin(rawStoreAccess(service.RemoveUser))) - router.PATCH("/chronograf/v1/users/:id", EnsureSuperAdmin(rawStoreAccess(service.UpdateUser))) - - // Dashboards - router.GET("/chronograf/v1/dashboards", EnsureViewer(service.Dashboards)) - router.POST("/chronograf/v1/dashboards", EnsureEditor(service.NewDashboard)) - - router.GET("/chronograf/v1/dashboards/:id", EnsureViewer(service.DashboardID)) - router.DELETE("/chronograf/v1/dashboards/:id", EnsureEditor(service.RemoveDashboard)) - router.PUT("/chronograf/v1/dashboards/:id", EnsureEditor(service.ReplaceDashboard)) - router.PATCH("/chronograf/v1/dashboards/:id", EnsureEditor(service.UpdateDashboard)) - // Dashboard Cells - router.GET("/chronograf/v1/dashboards/:id/cells", EnsureViewer(service.DashboardCells)) - router.POST("/chronograf/v1/dashboards/:id/cells", EnsureEditor(service.NewDashboardCell)) - - router.GET("/chronograf/v1/dashboards/:id/cells/:cid", EnsureViewer(service.DashboardCellID)) - router.DELETE("/chronograf/v1/dashboards/:id/cells/:cid", EnsureEditor(service.RemoveDashboardCell)) - router.PUT("/chronograf/v1/dashboards/:id/cells/:cid", EnsureEditor(service.ReplaceDashboardCell)) - // Dashboard Templates - router.GET("/chronograf/v1/dashboards/:id/templates", EnsureViewer(service.Templates)) - router.POST("/chronograf/v1/dashboards/:id/templates", EnsureEditor(service.NewTemplate)) - - router.GET("/chronograf/v1/dashboards/:id/templates/:tid", EnsureViewer(service.TemplateID)) - router.DELETE("/chronograf/v1/dashboards/:id/templates/:tid", EnsureEditor(service.RemoveTemplate)) - router.PUT("/chronograf/v1/dashboards/:id/templates/:tid", EnsureEditor(service.ReplaceTemplate)) - - // Databases - router.GET("/chronograf/v1/sources/:id/dbs", EnsureViewer(service.GetDatabases)) - router.POST("/chronograf/v1/sources/:id/dbs", EnsureEditor(service.NewDatabase)) - - router.DELETE("/chronograf/v1/sources/:id/dbs/:db", EnsureEditor(service.DropDatabase)) - - // Retention Policies - router.GET("/chronograf/v1/sources/:id/dbs/:db/rps", EnsureViewer(service.RetentionPolicies)) - router.POST("/chronograf/v1/sources/:id/dbs/:db/rps", EnsureEditor(service.NewRetentionPolicy)) - - router.PUT("/chronograf/v1/sources/:id/dbs/:db/rps/:rp", EnsureEditor(service.UpdateRetentionPolicy)) - router.DELETE("/chronograf/v1/sources/:id/dbs/:db/rps/:rp", EnsureEditor(service.DropRetentionPolicy)) - - // Measurements - router.GET("/chronograf/v1/sources/:id/dbs/:db/measurements", EnsureViewer(service.Measurements)) - - // Global application config for Chronograf - router.GET("/chronograf/v1/config", EnsureSuperAdmin(service.Config)) - router.GET("/chronograf/v1/config/auth", EnsureSuperAdmin(service.AuthConfig)) - router.PUT("/chronograf/v1/config/auth", EnsureSuperAdmin(service.ReplaceAuthConfig)) - - // Organization config settings for Chronograf - router.GET("/chronograf/v1/org_config", EnsureViewer(service.OrganizationConfig)) - router.GET("/chronograf/v1/org_config/logviewer", EnsureViewer(service.OrganizationLogViewerConfig)) - router.PUT("/chronograf/v1/org_config/logviewer", EnsureEditor(service.ReplaceOrganizationLogViewerConfig)) - - router.GET("/chronograf/v1/env", EnsureViewer(service.Environment)) - - allRoutes := &AllRoutes{ - Logger: opts.Logger, - StatusFeed: opts.StatusFeedURL, - CustomLinks: opts.CustomLinks, - } - - getPrincipal := func(r *http.Request) oauth2.Principal { - p, _ := HasAuthorizedToken(opts.Auth, r) - return p - } - allRoutes.GetPrincipal = getPrincipal - router.Handler("GET", "/chronograf/v1/", allRoutes) - - var out http.Handler - - /* Authentication */ - if opts.UseAuth { - // Encapsulate the router with OAuth2 - var auth http.Handler - auth, allRoutes.AuthRoutes = AuthAPI(opts, router) - allRoutes.LogoutLink = path.Join(opts.Basepath, "/oauth/logout") - - // Create middleware that redirects to the appropriate provider logout - router.GET("/oauth/logout", Logout("/", opts.Basepath, allRoutes.AuthRoutes)) - out = Logger(opts.Logger, FlushingHandler(auth)) - } else { - out = Logger(opts.Logger, FlushingHandler(router)) - } - - return out -} - -// AuthAPI adds the OAuth routes if auth is enabled. -func AuthAPI(opts MuxOpts, router chronograf.Router) (http.Handler, AuthRoutes) { - routes := AuthRoutes{} - for _, pf := range opts.ProviderFuncs { - pf(func(p oauth2.Provider, m oauth2.Mux) { - urlName := PathEscape(strings.ToLower(p.Name())) - - loginPath := path.Join("/oauth", urlName, "login") - logoutPath := path.Join("/oauth", urlName, "logout") - callbackPath := path.Join("/oauth", urlName, "callback") - - router.Handler("GET", loginPath, m.Login()) - router.Handler("GET", logoutPath, m.Logout()) - router.Handler("GET", callbackPath, m.Callback()) - routes = append(routes, AuthRoute{ - Name: p.Name(), - Label: strings.Title(p.Name()), - // AuthRoutes are content served to the page. When Basepath is set, it - // says that all content served to the page will be prefixed with the - // basepath. Since these routes are consumed by JS, it will need the - // basepath set to traverse a proxy correctly - Login: path.Join(opts.Basepath, loginPath), - Logout: path.Join(opts.Basepath, logoutPath), - Callback: path.Join(opts.Basepath, callbackPath), - }) - }) - } - - rootPath := path.Join(opts.Basepath, "/chronograf/v1") - logoutPath := path.Join(opts.Basepath, "/oauth/logout") - - tokenMiddleware := AuthorizedToken(opts.Auth, opts.Logger, router) - // Wrap the API with token validation middleware. - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - cleanPath := path.Clean(r.URL.Path) // compare ignoring path garbage, trailing slashes, etc. - if (strings.HasPrefix(cleanPath, rootPath) && len(cleanPath) > len(rootPath)) || cleanPath == logoutPath { - tokenMiddleware.ServeHTTP(w, r) - return - } - router.ServeHTTP(w, r) - }), routes -} - -func encodeJSON(w http.ResponseWriter, status int, v interface{}, logger chronograf.Logger) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(status) - if err := json.NewEncoder(w).Encode(v); err != nil { - unknownErrorWithMessage(w, err, logger) - } -} - -// Error writes an JSON message -func Error(w http.ResponseWriter, code int, msg string, logger chronograf.Logger) { - e := ErrorMessage{ - Code: code, - Message: msg, - } - b, err := json.Marshal(e) - if err != nil { - code = http.StatusInternalServerError - b = []byte(`{"code": 500, "message":"server_error"}`) - } - - logger. - WithField("component", "server"). - WithField("http_status ", code). - Error("Error message ", msg) - w.Header().Set("Content-Type", JSONType) - w.WriteHeader(code) - _, _ = w.Write(b) -} - -func invalidData(w http.ResponseWriter, err error, logger chronograf.Logger) { - Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err), logger) -} - -func invalidJSON(w http.ResponseWriter, logger chronograf.Logger) { - Error(w, http.StatusBadRequest, "unparsable JSON", logger) -} - -func unknownErrorWithMessage(w http.ResponseWriter, err error, logger chronograf.Logger) { - Error(w, http.StatusInternalServerError, fmt.Sprintf("unknown error: %v", err), logger) -} - -func notFound(w http.ResponseWriter, id interface{}, logger chronograf.Logger) { - Error(w, http.StatusNotFound, fmt.Sprintf("ID %v not found", id), logger) -} - -func paramID(key string, r *http.Request) (int, error) { - ctx := r.Context() - param := jhttprouter.ParamsFromContext(ctx).ByName(key) - id, err := strconv.Atoi(param) - if err != nil { - return -1, fmt.Errorf("error converting ID %s", param) - } - return id, nil -} - -func paramStr(key string, r *http.Request) (string, error) { - ctx := r.Context() - param := jhttprouter.ParamsFromContext(ctx).ByName(key) - return param, nil -} diff --git a/chronograf/server/org_config.go b/chronograf/server/org_config.go deleted file mode 100644 index 1ea43943902..00000000000 --- a/chronograf/server/org_config.go +++ /dev/null @@ -1,180 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -type organizationConfigLinks struct { - Self string `json:"self"` // Self link mapping to this resource - LogViewer string `json:"logViewer"` // LogViewer link to the organization log viewer config endpoint -} - -type organizationConfigResponse struct { - Links organizationConfigLinks `json:"links"` - chronograf.OrganizationConfig -} - -func newOrganizationConfigResponse(c chronograf.OrganizationConfig) *organizationConfigResponse { - return &organizationConfigResponse{ - Links: organizationConfigLinks{ - Self: "/chronograf/v1/org_config", - LogViewer: "/chronograf/v1/org_config/logviewer", - }, - OrganizationConfig: c, - } -} - -type logViewerConfigResponse struct { - Links selfLinks `json:"links"` - chronograf.LogViewerConfig -} - -func newLogViewerConfigResponse(c chronograf.LogViewerConfig) *logViewerConfigResponse { - return &logViewerConfigResponse{ - Links: selfLinks{ - Self: "/chronograf/v1/org_config/logviewer", - }, - LogViewerConfig: c, - } -} - -// OrganizationConfig retrieves the organization-wide config settings -func (s *Service) OrganizationConfig(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - orgID, ok := hasOrganizationContext(ctx) - if !ok { - Error(w, http.StatusBadRequest, "Organization not found on context", s.Logger) - return - } - - config, err := s.Store.OrganizationConfig(ctx).FindOrCreate(ctx, orgID) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newOrganizationConfigResponse(*config) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// OrganizationLogViewerConfig retrieves the log viewer UI section of the organization config -// This uses a FindOrCreate function to ensure that any new organizations have -// default organization config values, without having to associate organization creation with -// organization config creation. -func (s *Service) OrganizationLogViewerConfig(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - orgID, ok := hasOrganizationContext(ctx) - if !ok { - Error(w, http.StatusBadRequest, "Organization not found on context", s.Logger) - return - } - - config, err := s.Store.OrganizationConfig(ctx).FindOrCreate(ctx, orgID) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newLogViewerConfigResponse(config.LogViewer) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// ReplaceOrganizationLogViewerConfig replaces the log viewer UI section of the organization config -func (s *Service) ReplaceOrganizationLogViewerConfig(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - orgID, ok := hasOrganizationContext(ctx) - if !ok { - Error(w, http.StatusBadRequest, "Organization not found on context", s.Logger) - return - } - - var logViewerConfig chronograf.LogViewerConfig - if err := json.NewDecoder(r.Body).Decode(&logViewerConfig); err != nil { - invalidJSON(w, s.Logger) - return - } - if err := validLogViewerConfig(logViewerConfig); err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - config, err := s.Store.OrganizationConfig(ctx).FindOrCreate(ctx, orgID) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - config.LogViewer = logViewerConfig - if err := s.Store.OrganizationConfig(ctx).Put(ctx, config); err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - res := newLogViewerConfigResponse(config.LogViewer) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// validLogViewerConfig ensures that the request body log viewer UI config is valid -// to be valid, it must: not be empty, have at least one column, not have multiple -// columns with the same name or position value, each column must have a visibility -// of either "visible" or "hidden" and if a column is of type severity, it must have -// at least one severity format of type icon, text, or both -func validLogViewerConfig(c chronograf.LogViewerConfig) error { - if len(c.Columns) == 0 { - return fmt.Errorf("invalid log viewer config: must have at least 1 column") - } - - nameMatcher := map[string]bool{} - positionMatcher := map[int32]bool{} - - for _, clm := range c.Columns { - iconCount := 0 - textCount := 0 - visibility := 0 - - // check that each column has a unique value for the name and position properties - if _, ok := nameMatcher[clm.Name]; ok { - return fmt.Errorf("invalid log viewer config: Duplicate column name %s", clm.Name) - } - nameMatcher[clm.Name] = true - if _, ok := positionMatcher[clm.Position]; ok { - return fmt.Errorf("invalid log viewer config: Multiple columns with same position value") - } - positionMatcher[clm.Position] = true - - for _, e := range clm.Encodings { - if e.Type == "visibility" { - visibility++ - if !(e.Value == "visible" || e.Value == "hidden") { - return fmt.Errorf("invalid log viewer config: invalid visibility in column %s", clm.Name) - } - } - - if clm.Name == "severity" { - if e.Value == "icon" { - iconCount++ - } else if e.Value == "text" { - textCount++ - } - } - } - - if visibility != 1 { - return fmt.Errorf("invalid log viewer config: missing visibility encoding in column %s", clm.Name) - } - - if clm.Name == "severity" { - if iconCount+textCount == 0 || iconCount > 1 || textCount > 1 { - return fmt.Errorf("invalid log viewer config: invalid number of severity format encodings in column %s", clm.Name) - } - } - } - - return nil -} diff --git a/chronograf/server/org_config_test.go b/chronograf/server/org_config_test.go deleted file mode 100644 index 3ec842fefd4..00000000000 --- a/chronograf/server/org_config_test.go +++ /dev/null @@ -1,1076 +0,0 @@ -package server - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/organizations" -) - -func TestOrganizationConfig(t *testing.T) { - type args struct { - organizationID string - } - type fields struct { - organizationConfigStore chronograf.OrganizationConfigStore - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - args args - fields fields - wants wants - }{ - { - name: "Get organization configuration", - args: args{ - organizationID: "default", - }, - fields: fields{ - organizationConfigStore: &mocks.OrganizationConfigStore{ - FindOrCreateF: func(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - switch orgID { - case "default": - return &chronograf.OrganizationConfig{ - OrganizationID: "default", - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "time", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "severity", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "procid", - Position: 5, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Proc ID", - }, - }, - }, - { - Name: "appname", - Position: 6, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "displayName", - Value: "Application", - }, - }, - }, - { - Name: "host", - Position: 7, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, nil - default: - return nil, chronograf.ErrOrganizationConfigNotFound - } - }, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/org_config","logViewer":"/chronograf/v1/org_config/logviewer"},"organization":"default","logViewer":{"columns":[{"name":"time","position":0,"encodings":[{"type":"visibility","value":"hidden"}]},{"name":"severity","position":1,"encodings":[{"type":"visibility","value":"visible"},{"type":"label","value":"icon"},{"type":"label","value":"text"}]},{"name":"timestamp","position":2,"encodings":[{"type":"visibility","value":"visible"}]},{"name":"message","position":3,"encodings":[{"type":"visibility","value":"visible"}]},{"name":"facility","position":4,"encodings":[{"type":"visibility","value":"visible"}]},{"name":"procid","position":5,"encodings":[{"type":"visibility","value":"visible"},{"type":"displayName","value":"Proc ID"}]},{"name":"appname","position":6,"encodings":[{"type":"visibility","value":"visible"},{"type":"displayName","value":"Application"}]},{"name":"host","position":7,"encodings":[{"type":"visibility","value":"visible"}]}]}}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationConfigStore: tt.fields.organizationConfigStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - ctx := context.WithValue(r.Context(), organizations.ContextKey, tt.args.organizationID) - r = r.WithContext(ctx) - - s.OrganizationConfig(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. OrganizationConfig() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. OrganizationConfig() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. OrganizationConfig() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func TestLogViewerOrganizationConfig(t *testing.T) { - type args struct { - organizationID string - } - type fields struct { - organizationConfigStore chronograf.OrganizationConfigStore - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - args args - fields fields - wants wants - }{ - { - name: "Get log viewer configuration", - args: args{ - organizationID: "default", - }, - fields: fields{ - organizationConfigStore: &mocks.OrganizationConfigStore{ - FindOrCreateF: func(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - switch orgID { - case "default": - return &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "color", - Value: "emergency", - Name: "ruby", - }, - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "displayName", - Value: "Log Severity", - }, - }, - }, - }, - }, - }, nil - default: - return nil, chronograf.ErrOrganizationConfigNotFound - } - }, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/org_config/logviewer"},"columns":[{"name":"severity","position":0,"encodings":[{"type":"color","value":"emergency","name":"ruby"},{"type":"color","value":"info","name":"rainforest"},{"type":"displayName","value":"Log Severity"}]}]}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationConfigStore: tt.fields.organizationConfigStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - ctx := context.WithValue(r.Context(), organizations.ContextKey, tt.args.organizationID) - r = r.WithContext(ctx) - - s.OrganizationLogViewerConfig(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. LogViewerOrganizationConfig() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. LogViewerOrganizationConfig() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. LogViewerOrganizationConfig() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func TestReplaceLogViewerOrganizationConfig(t *testing.T) { - type fields struct { - organizationConfigStore chronograf.OrganizationConfigStore - } - type args struct { - payload interface{} // expects JSON serializable struct - organizationID string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Set log viewer configuration", - fields: fields{ - organizationConfigStore: &mocks.OrganizationConfigStore{ - FindOrCreateF: func(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - switch orgID { - case "1337": - return &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "icon", - }, - }, - }, - }, - }, - }, nil - default: - return nil, chronograf.ErrOrganizationConfigNotFound - } - }, - PutF: func(ctx context.Context, target *chronograf.OrganizationConfig) error { - return nil - }, - }, - }, - args: args{ - payload: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "color", - Value: "info", - Name: "pineapple", - }, - { - Type: "color", - Value: "emergency", - Name: "ruby", - }, - { - Type: "visibility", - Value: "visible", - }, - { - Type: "label", - Value: "icon", - }, - }, - }, - { - Name: "messages", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "displayName", - Value: "Log Messages", - }, - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - organizationID: "1337", - }, - wants: wants{ - statusCode: 200, - contentType: "application/json", - body: `{"links":{"self":"/chronograf/v1/org_config/logviewer"},"columns":[{"name":"severity","position":1,"encodings":[{"type":"color","value":"info","name":"pineapple"},{"type":"color","value":"emergency","name":"ruby"},{"type":"visibility","value":"visible"},{"type":"label","value":"icon"}]},{"name":"messages","position":0,"encodings":[{"type":"displayName","value":"Log Messages"},{"type":"visibility","value":"visible"}]}]}`, - }, - }, - { - name: "Set invalid log viewer configuration – empty", - fields: fields{ - organizationConfigStore: &mocks.OrganizationConfigStore{ - FindOrCreateF: func(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - switch orgID { - case "1337": - return &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, nil - default: - return nil, chronograf.ErrOrganizationConfigNotFound - } - }, - PutF: func(ctx context.Context, target *chronograf.OrganizationConfig) error { - return nil - }, - }, - }, - args: args{ - payload: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{}, - }, - organizationID: "1337", - }, - wants: wants{ - statusCode: 400, - contentType: "application/json", - body: `{"code":400,"message":"invalid log viewer config: must have at least 1 column"}`, - }, - }, - { - name: "Set invalid log viewer configuration - duplicate column name", - fields: fields{ - organizationConfigStore: &mocks.OrganizationConfigStore{ - FindOrCreateF: func(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - switch orgID { - case "1337": - return &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "procid", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - }, - }, - }, nil - default: - return nil, chronograf.ErrOrganizationConfigNotFound - } - }, - PutF: func(ctx context.Context, target *chronograf.OrganizationConfig) error { - return nil - }, - }, - }, - args: args{ - payload: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "procid", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "procid", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - }, - }, - organizationID: "1337", - }, - wants: wants{ - statusCode: 400, - contentType: "application/json", - body: `{"code":400,"message":"invalid log viewer config: Duplicate column name procid"}`, - }, - }, - { - name: "Set invalid log viewer configuration - multiple columns with same position value", - fields: fields{ - organizationConfigStore: &mocks.OrganizationConfigStore{ - FindOrCreateF: func(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - switch orgID { - case "1337": - return &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "procid", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - }, - }, - }, nil - default: - return nil, chronograf.ErrOrganizationConfigNotFound - } - }, - PutF: func(ctx context.Context, target *chronograf.OrganizationConfig) error { - return nil - }, - }, - }, - args: args{ - payload: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "procid", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - { - Name: "timestamp", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "hidden", - }, - }, - }, - }, - }, - organizationID: "1337", - }, - wants: wants{ - statusCode: 400, - contentType: "application/json", - body: `{"code":400,"message":"invalid log viewer config: Multiple columns with same position value"}`, - }, - }, - { - name: "Set invalid log viewer configuration – no visibility", - fields: fields{ - organizationConfigStore: &mocks.OrganizationConfigStore{ - FindOrCreateF: func(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) { - switch orgID { - case "1337": - return &chronograf.OrganizationConfig{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "label", - Value: "icon", - }, - }, - }, - }, - }, - }, nil - default: - return nil, chronograf.ErrOrganizationConfigNotFound - } - }, - PutF: func(ctx context.Context, target *chronograf.OrganizationConfig) error { - return nil - }, - }, - }, - args: args{ - payload: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 1, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "color", - Value: "info", - Name: "pineapple", - }, - { - Type: "color", - Value: "emergency", - Name: "ruby", - }, - { - Type: "label", - Value: "icon", - }, - }, - }, - }, - }, - organizationID: "1337", - }, - wants: wants{ - statusCode: 400, - contentType: "application/json", - body: `{"code":400,"message":"invalid log viewer config: missing visibility encoding in column severity"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationConfigStore: tt.fields.organizationConfigStore, - }, - Logger: &chronograf.NoopLogger{}, - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("GET", "http://any.url", nil) - ctx := context.WithValue(r.Context(), organizations.ContextKey, tt.args.organizationID) - r = r.WithContext(ctx) - buf, _ := json.Marshal(tt.args.payload) - r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - - s.ReplaceOrganizationLogViewerConfig(w, r) - - resp := w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wants.statusCode { - t.Errorf("%q. ReplaceLogViewerOrganizationConfig() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. ReplaceLogViewerOrganizationConfig() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. ReplaceLogViewerOrganizationConfig() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func Test_validLogViewerConfig(t *testing.T) { - type args struct { - LogViewer chronograf.LogViewerConfig - } - - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "cannot have 0 columns", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: nil, - }, - }, - wantErr: true, - }, - { - name: "can have 1 column", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "can have more than 1 column", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "facility", - Position: 4, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "cannot have multiple columns with the same name value", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "timestamp", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "cannot have multiple columns with the same position value", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - { - Name: "message", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "each column must have a visibility encoding value of either 'visible' or 'hidden'", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "timestamp", - Position: 2, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "bob", - }, - }, - }, - { - Name: "message", - Position: 3, - Encodings: []chronograf.ColumnEncoding{ - - { - Type: "visibility", - Value: "visible", - }, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "severity column can have 1 of each icon and text label encoding", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "severity column can 1 icon label encoding", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "label", - Value: "icon", - }, - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "severity column can have 1 text label encoding", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "severity column cannot have 0 label encodings", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "severity column cannot have more than 1 icon label encoding", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "label", - Value: "icon", - }, - { - Type: "label", - Value: "icon", - }, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "severity column cannot have more than 1 text label encoding", - args: args{ - LogViewer: chronograf.LogViewerConfig{ - Columns: []chronograf.LogViewerColumn{ - { - Name: "severity", - Position: 0, - Encodings: []chronograf.ColumnEncoding{ - { - Type: "visibility", - Value: "visible", - }, - { - Type: "color", - Value: "info", - Name: "rainforest", - }, - { - Type: "label", - Value: "text", - }, - { - Type: "label", - Value: "text", - }, - }, - }, - }, - }, - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := validLogViewerConfig(tt.args.LogViewer) - - if (tt.wantErr && got == nil) || (!tt.wantErr && got != nil) { - t.Errorf("%q. validLogViewerConfig().\ngot: %v\nwantErr: %v", tt.name, got, tt.wantErr) - } - }) - } -} diff --git a/chronograf/server/organizations.go b/chronograf/server/organizations.go deleted file mode 100644 index 01f2cb8ce97..00000000000 --- a/chronograf/server/organizations.go +++ /dev/null @@ -1,232 +0,0 @@ -package server - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/organizations" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -type organizationRequest struct { - Name string `json:"name"` - DefaultRole string `json:"defaultRole"` -} - -func (r *organizationRequest) ValidCreate() error { - if r.Name == "" { - return fmt.Errorf("name required on Chronograf Organization request body") - } - - return r.ValidDefaultRole() -} - -func (r *organizationRequest) ValidUpdate() error { - if r.Name == "" && r.DefaultRole == "" { - return fmt.Errorf("no fields to update") - } - - if r.DefaultRole != "" { - return r.ValidDefaultRole() - } - - return nil -} - -func (r *organizationRequest) ValidDefaultRole() error { - if r.DefaultRole == "" { - r.DefaultRole = roles.MemberRoleName - } - - switch r.DefaultRole { - case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName: - return nil - default: - return fmt.Errorf("default role must be member, viewer, editor, or admin") - } -} - -type organizationResponse struct { - Links selfLinks `json:"links"` - chronograf.Organization -} - -func newOrganizationResponse(o *chronograf.Organization) *organizationResponse { - if o == nil { - o = &chronograf.Organization{} - } - return &organizationResponse{ - Organization: *o, - Links: selfLinks{ - Self: fmt.Sprintf("/chronograf/v1/organizations/%s", o.ID), - }, - } -} - -type organizationsResponse struct { - Links selfLinks `json:"links"` - Organizations []*organizationResponse `json:"organizations"` -} - -func newOrganizationsResponse(orgs []chronograf.Organization) *organizationsResponse { - orgsResp := make([]*organizationResponse, len(orgs)) - for i, org := range orgs { - orgsResp[i] = newOrganizationResponse(&org) - } - return &organizationsResponse{ - Organizations: orgsResp, - Links: selfLinks{ - Self: "/chronograf/v1/organizations", - }, - } -} - -// Organizations retrieves all organizations from store -func (s *Service) Organizations(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - orgs, err := s.Store.Organizations(ctx).All(ctx) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newOrganizationsResponse(orgs) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// NewOrganization adds a new organization to store -func (s *Service) NewOrganization(w http.ResponseWriter, r *http.Request) { - var req organizationRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := req.ValidCreate(); err != nil { - invalidData(w, err, s.Logger) - return - } - - ctx := r.Context() - org := &chronograf.Organization{ - Name: req.Name, - DefaultRole: req.DefaultRole, - } - - res, err := s.Store.Organizations(ctx).Add(ctx, org) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - // Now that the organization was created, add the user - // making the request to the organization - user, ok := hasUserContext(ctx) - if !ok { - // Best attempt at cleanup the organization if there were any errors - _ = s.Store.Organizations(ctx).Delete(ctx, res) - Error(w, http.StatusInternalServerError, "failed to retrieve user from context", s.Logger) - return - } - - user.Roles = []chronograf.Role{ - { - Organization: res.ID, - Name: roles.AdminRoleName, - }, - } - - orgCtx := context.WithValue(ctx, organizations.ContextKey, res.ID) - _, err = s.Store.Users(orgCtx).Add(orgCtx, user) - if err != nil { - // Best attempt at cleanup the organization if there were any errors adding user to org - _ = s.Store.Organizations(ctx).Delete(ctx, res) - s.Logger.Error("failed to add user to organization", err.Error()) - Error(w, http.StatusInternalServerError, "failed to add user to organization", s.Logger) - return - } - - co := newOrganizationResponse(res) - location(w, co.Links.Self) - encodeJSON(w, http.StatusCreated, co, s.Logger) -} - -// OrganizationID retrieves a organization with ID from store -func (s *Service) OrganizationID(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id := httprouter.GetParamFromContext(ctx, "oid") - - org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &id}) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newOrganizationResponse(org) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// UpdateOrganization updates an organization in the organizations store -func (s *Service) UpdateOrganization(w http.ResponseWriter, r *http.Request) { - var req organizationRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := req.ValidUpdate(); err != nil { - invalidData(w, err, s.Logger) - return - } - - ctx := r.Context() - id := httprouter.GetParamFromContext(ctx, "oid") - - org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &id}) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - if req.Name != "" { - org.Name = req.Name - } - - if req.DefaultRole != "" { - org.DefaultRole = req.DefaultRole - } - - err = s.Store.Organizations(ctx).Update(ctx, org) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - res := newOrganizationResponse(org) - location(w, res.Links.Self) - encodeJSON(w, http.StatusOK, res, s.Logger) - -} - -// RemoveOrganization removes an organization in the organizations store -func (s *Service) RemoveOrganization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id := httprouter.GetParamFromContext(ctx, "oid") - - org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &id}) - if err != nil { - Error(w, http.StatusNotFound, err.Error(), s.Logger) - return - } - if err := s.Store.Organizations(ctx).Delete(ctx, org); err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - w.WriteHeader(http.StatusNoContent) -} diff --git a/chronograf/server/organizations_test.go b/chronograf/server/organizations_test.go deleted file mode 100644 index 7392bab6d69..00000000000 --- a/chronograf/server/organizations_test.go +++ /dev/null @@ -1,726 +0,0 @@ -package server - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -func TestService_OrganizationID(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - id string - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Get Single Organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1337": - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - }, nil - default: - return nil, fmt.Errorf("organization with ID %s not found", *q.ID) - } - }, - }, - }, - id: "1337", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"links":{"self":"/chronograf/v1/organizations/1337"},"id":"1337","name":"The Good Place"}`, - }, - { - name: "Get Single Organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1337": - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - }, nil - default: - return nil, fmt.Errorf("organization with ID %s not found", *q.ID) - } - }, - }, - }, - id: "1337", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"id":"1337","name":"The Good Place","links":{"self":"/chronograf/v1/organizations/1337"}}`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: tt.fields.Logger, - } - - tt.args.r = tt.args.r.WithContext(httprouter.WithParams( - context.Background(), - httprouter.Params{ - { - Key: "oid", - Value: tt.id, - }, - })) - - s.OrganizationID(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. OrganizationID() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. OrganizationID() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. OrganizationID() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestService_Organizations(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Get Organizations", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - AllF: func(ctx context.Context) ([]chronograf.Organization, error) { - return []chronograf.Organization{ - chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - }, - chronograf.Organization{ - ID: "100", - Name: "The Bad Place", - }, - }, nil - }, - }, - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"links":{"self":"/chronograf/v1/organizations"},"organizations":[{"links":{"self":"/chronograf/v1/organizations/1337"},"id":"1337","name":"The Good Place"},{"links":{"self":"/chronograf/v1/organizations/100"},"id":"100","name":"The Bad Place"}]}`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: tt.fields.Logger, - } - - s.Organizations(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. Organizations() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. Organizations() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. Organizations() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestService_UpdateOrganization(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - org *organizationRequest - } - tests := []struct { - name string - fields fields - args args - id string - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Update Organization name", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - org: &organizationRequest{ - Name: "The Bad Place", - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - UpdateF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - }, - }, - id: "1337", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"id":"1337","name":"The Bad Place","defaultRole":"viewer","links":{"self":"/chronograf/v1/organizations/1337"}}`, - }, - { - name: "Update Organization - nothing to update", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - org: &organizationRequest{}, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - UpdateF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - DefaultRole: roles.ViewerRoleName, - }, nil - }, - }, - }, - id: "1337", - wantStatus: http.StatusUnprocessableEntity, - wantContentType: "application/json", - wantBody: `{"code":422,"message":"no fields to update"}`, - }, - { - name: "Update Organization default role", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - org: &organizationRequest{ - DefaultRole: roles.ViewerRoleName, - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - UpdateF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - DefaultRole: roles.MemberRoleName, - }, nil - }, - }, - }, - id: "1337", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"links":{"self":"/chronograf/v1/organizations/1337"},"id":"1337","name":"The Good Place","defaultRole":"viewer"}`, - }, - { - name: "Update Organization - invalid update", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - org: &organizationRequest{}, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - UpdateF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return nil, nil - }, - }, - }, - id: "1337", - wantStatus: http.StatusUnprocessableEntity, - wantContentType: "application/json", - wantBody: `{"code":422,"message":"no fields to update"}`, - }, - { - name: "Update Organization - invalid role", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - org: &organizationRequest{ - DefaultRole: "sillyrole", - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - UpdateF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return nil, nil - }, - }, - }, - id: "1337", - wantStatus: http.StatusUnprocessableEntity, - wantContentType: "application/json", - wantBody: `{"code":422,"message":"default role must be member, viewer, editor, or admin"}`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: tt.fields.Logger, - } - - tt.args.r = tt.args.r.WithContext(httprouter.WithParams(context.Background(), - httprouter.Params{ - { - Key: "oid", - Value: tt.id, - }, - })) - - buf, _ := json.Marshal(tt.args.org) - tt.args.r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - s.UpdateOrganization(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. NewOrganization() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestService_RemoveOrganization(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - id string - wantStatus int - }{ - { - name: "Update Organization name", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - DeleteF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1337": - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - }, nil - default: - return nil, fmt.Errorf("organization with ID %s not found", *q.ID) - } - }, - }, - }, - id: "1337", - wantStatus: http.StatusNoContent, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: tt.fields.Logger, - } - - tt.args.r = tt.args.r.WithContext(httprouter.WithParams(context.Background(), - httprouter.Params{ - { - Key: "oid", - Value: tt.id, - }, - })) - s.RemoveOrganization(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - }) - } -} - -func TestService_NewOrganization(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - UsersStore chronograf.UsersStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - org *organizationRequest - user *chronograf.User - } - tests := []struct { - name string - fields fields - args args - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Create Organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - user: &chronograf.User{ - ID: 1, - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - }, - org: &organizationRequest{ - Name: "The Good Place", - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1, - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - }, nil - }, - }, - }, - wantStatus: http.StatusCreated, - wantContentType: "application/json", - wantBody: `{"id":"1337","name":"The Good Place","links":{"self":"/chronograf/v1/organizations/1337"}}`, - }, - { - name: "Fail to create Organization - no org name", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - user: &chronograf.User{ - ID: 1, - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - }, - org: &organizationRequest{}, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1, - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) { - return nil, nil - }, - }, - }, - wantStatus: http.StatusUnprocessableEntity, - wantContentType: "application/json", - wantBody: `{"code":422,"message":"name required on Chronograf Organization request body"}`, - }, - { - name: "Create Organization - no user on context", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - org: &organizationRequest{ - Name: "The Good Place", - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1, - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - }, nil - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - }, nil - }, - DeleteF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - }, - }, - wantStatus: http.StatusInternalServerError, - wantContentType: "application/json", - wantBody: `{"code":500,"message":"failed to retrieve user from context"}`, - }, - { - name: "Create Organization - failed to add user to organization", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - org: &organizationRequest{ - Name: "The Good Place", - }, - user: &chronograf.User{ - ID: 1, - Name: "bobetta", - Provider: "github", - Scheme: "oauth2", - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { - return nil, fmt.Errorf("failed to add user to org") - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "1337", - Name: "The Good Place", - }, nil - }, - DeleteF: func(ctx context.Context, o *chronograf.Organization) error { - return nil - }, - }, - }, - wantStatus: http.StatusInternalServerError, - wantContentType: "application/json", - wantBody: `{"code":500,"message":"failed to add user to organization"}`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - OrganizationsStore: tt.fields.OrganizationsStore, - UsersStore: tt.fields.UsersStore, - }, - Logger: tt.fields.Logger, - } - - ctx := tt.args.r.Context() - ctx = context.WithValue(ctx, UserContextKey, tt.args.user) - tt.args.r = tt.args.r.WithContext(ctx) - - buf, _ := json.Marshal(tt.args.org) - tt.args.r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - s.NewOrganization(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. NewOrganization() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - }) - } -} diff --git a/chronograf/server/path.go b/chronograf/server/path.go deleted file mode 100644 index c1293e3cca7..00000000000 --- a/chronograf/server/path.go +++ /dev/null @@ -1,10 +0,0 @@ -package server - -import "net/url" - -// PathEscape escapes the string so it can be safely placed inside a URL path segment. -// Change to url.PathEscape for go 1.8 -func PathEscape(str string) string { - u := &url.URL{Path: str} - return u.String() -} diff --git a/chronograf/server/permissions.go b/chronograf/server/permissions.go deleted file mode 100644 index ae7123e567e..00000000000 --- a/chronograf/server/permissions.go +++ /dev/null @@ -1,55 +0,0 @@ -package server - -import ( - "fmt" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -// Permissions returns all possible permissions for this source. -func (s *Service) Permissions(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - src, err := s.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, s.Logger) - return - } - - ts, err := s.TimeSeries(src) - if err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - if err = ts.Connect(ctx, &src); err != nil { - msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err) - Error(w, http.StatusBadRequest, msg, s.Logger) - return - } - - perms := ts.Permissions(ctx) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - httpAPISrcs := "/chronograf/v1/sources" - res := struct { - Permissions chronograf.Permissions `json:"permissions"` - Links map[string]string `json:"links"` // Links are URI locations related to user - }{ - Permissions: perms, - Links: map[string]string{ - "self": fmt.Sprintf("%s/%d/permissions", httpAPISrcs, srcID), - "source": fmt.Sprintf("%s/%d", httpAPISrcs, srcID), - }, - } - encodeJSON(w, http.StatusOK, res, s.Logger) -} diff --git a/chronograf/server/permissions_test.go b/chronograf/server/permissions_test.go deleted file mode 100644 index 1d0db37fa44..00000000000 --- a/chronograf/server/permissions_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package server - -import ( - "bytes" - "context" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func TestService_Permissions(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - TimeSeries TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - ID string - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "New user for data source", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://server.local/chronograf/v1/sources/1", - ioutil.NopCloser( - bytes.NewReader([]byte(`{"name": "marty", "password": "the_lake"}`)))), - }, - fields: fields{ - UseAuth: true, - Logger: &chronograf.NoopLogger{}, - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "muh source", - Username: "name", - Password: "hunter2", - URL: "http://localhost:8086", - }, nil - }, - }, - TimeSeries: &mocks.TimeSeries{ - ConnectF: func(ctx context.Context, src *chronograf.Source) error { - return nil - }, - PermissionsF: func(ctx context.Context) chronograf.Permissions { - return chronograf.Permissions{ - { - Scope: chronograf.AllScope, - Allowed: chronograf.Allowances{"READ", "WRITE"}, - }, - } - }, - }, - }, - ID: "1", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"permissions":[{"scope":"all","allowed":["READ","WRITE"]}],"links":{"self":"/chronograf/v1/sources/1/permissions","source":"/chronograf/v1/sources/1"}} -`, - }, - } - for _, tt := range tests { - tt.args.r = tt.args.r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.ID, - }, - })) - h := &Service{ - Store: &mocks.Store{ - SourcesStore: tt.fields.SourcesStore, - }, - TimeSeriesClient: tt.fields.TimeSeries, - Logger: tt.fields.Logger, - UseAuth: tt.fields.UseAuth, - } - h.Permissions(tt.args.w, tt.args.r) - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. Permissions() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. Permissions() = %v, want %v", tt.name, content, tt.wantContentType) - } - if tt.wantBody != "" && string(body) != tt.wantBody { - t.Errorf("%q. Permissions() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - } -} diff --git a/chronograf/server/prefixing_redirector.go b/chronograf/server/prefixing_redirector.go deleted file mode 100644 index 0317ceb3993..00000000000 --- a/chronograf/server/prefixing_redirector.go +++ /dev/null @@ -1,34 +0,0 @@ -package server - -import ( - "net/http" -) - -type flushingResponseWriter struct { - http.ResponseWriter -} - -func (f *flushingResponseWriter) WriteHeader(status int) { - f.ResponseWriter.WriteHeader(status) -} - -// Flush is here because the underlying HTTP chunked transfer response writer -// to implement http.Flusher. Without it data is silently buffered. This -// was discovered when proxying kapacitor chunked logs. -func (f *flushingResponseWriter) Flush() { - if flusher, ok := f.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -// FlushingHandler may not actually do anything, but it was ostensibly -// implemented to flush response writers that can be flushed for the -// purposes in the comment above. -func FlushingHandler(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - iw := &flushingResponseWriter{ - ResponseWriter: w, - } - next.ServeHTTP(iw, r) - }) -} diff --git a/chronograf/server/proxy.go b/chronograf/server/proxy.go deleted file mode 100644 index ad313184a06..00000000000 --- a/chronograf/server/proxy.go +++ /dev/null @@ -1,121 +0,0 @@ -package server - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "strings" - "time" -) - -// Proxy proxies requests to services using the path query parameter. -func (s *Service) Proxy(w http.ResponseWriter, r *http.Request) { - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - id, err := paramID("kid", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - path := r.URL.Query().Get("path") - if path == "" { - Error(w, http.StatusUnprocessableEntity, "path query parameter required", s.Logger) - return - } - - ctx := r.Context() - srv, err := s.Store.Servers(ctx).Get(ctx, id) - if err != nil || srv.SrcID != srcID { - notFound(w, id, s.Logger) - return - } - - // To preserve any HTTP query arguments to the kapacitor path, - // we concat and parse them into u. - uri := singleJoiningSlash(srv.URL, path) - u, err := url.Parse(uri) - if err != nil { - msg := fmt.Sprintf("Error parsing kapacitor url: %v", err) - Error(w, http.StatusUnprocessableEntity, msg, s.Logger) - return - } - - director := func(req *http.Request) { - // Set the Host header of the original Kapacitor URL - req.Host = u.Host - req.URL = u - - // Because we are acting as a proxy, kapacitor needs to have the basic auth information set as - // a header directly - if srv.Username != "" && srv.Password != "" { - req.SetBasicAuth(srv.Username, srv.Password) - } - } - - // Without a FlushInterval the HTTP Chunked response for kapacitor logs is - // buffered and flushed every 30 seconds. - proxy := &httputil.ReverseProxy{ - Director: director, - FlushInterval: time.Second, - } - - // The connection to kapacitor is using a self-signed certificate. - // This modifies uses the same values as http.DefaultTransport but specifies - // InsecureSkipVerify - if srv.InsecureSkipVerify { - proxy.Transport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - } - proxy.ServeHTTP(w, r) -} - -// ProxyPost proxies POST to service -func (s *Service) ProxyPost(w http.ResponseWriter, r *http.Request) { - s.Proxy(w, r) -} - -// ProxyPatch proxies PATCH to Service -func (s *Service) ProxyPatch(w http.ResponseWriter, r *http.Request) { - s.Proxy(w, r) -} - -// ProxyGet proxies GET to service -func (s *Service) ProxyGet(w http.ResponseWriter, r *http.Request) { - s.Proxy(w, r) -} - -// ProxyDelete proxies DELETE to service -func (s *Service) ProxyDelete(w http.ResponseWriter, r *http.Request) { - s.Proxy(w, r) -} - -func singleJoiningSlash(a, b string) string { - aslash := strings.HasSuffix(a, "/") - bslash := strings.HasPrefix(b, "/") - if aslash && bslash { - return a + b[1:] - } - if !aslash && !bslash { - return a + "/" + b - } - return a + b -} diff --git a/chronograf/server/queries.go b/chronograf/server/queries.go deleted file mode 100644 index 38ab656a33e..00000000000 --- a/chronograf/server/queries.go +++ /dev/null @@ -1,134 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "golang.org/x/net/context" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/influx" - "github.com/influxdata/influxdb/v2/chronograf/influx/queries" -) - -// QueryRequest is query that will be converted to a queryConfig -type QueryRequest struct { - ID string `json:"id"` - Query string `json:"query"` -} - -// QueriesRequest converts all queries to queryConfigs with the help -// of the template variables -type QueriesRequest struct { - Queries []QueryRequest `json:"queries"` - TemplateVars []chronograf.TemplateVar `json:"tempVars,omitempty"` -} - -// QueryResponse is the return result of a QueryRequest including -// the raw query, the templated query, the queryConfig and the queryAST -type QueryResponse struct { - Duration int64 `json:"durationMs"` - ID string `json:"id"` - Query string `json:"query"` - QueryConfig chronograf.QueryConfig `json:"queryConfig"` - QueryAST *queries.SelectStatement `json:"queryAST,omitempty"` - QueryTemplated *string `json:"queryTemplated,omitempty"` -} - -// QueriesResponse is the response for a QueriesRequest -type QueriesResponse struct { - Queries []QueryResponse `json:"queries"` -} - -// Queries analyzes InfluxQL to produce front-end friendly QueryConfig -func (s *Service) Queries(w http.ResponseWriter, r *http.Request) { - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - src, err := s.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, s.Logger) - return - } - - var req QueriesRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - res := QueriesResponse{ - Queries: make([]QueryResponse, len(req.Queries)), - } - - for i, q := range req.Queries { - qr := QueryResponse{ - ID: q.ID, - Query: q.Query, - } - - qc := ToQueryConfig(q.Query) - if err := s.DefaultRP(ctx, &qc, &src); err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - qc.Shifts = []chronograf.TimeShift{} - qr.QueryConfig = qc - - if stmt, err := queries.ParseSelect(q.Query); err == nil { - qr.QueryAST = stmt - } - - if dur, err := influx.ParseTime(q.Query, time.Now()); err == nil { - ms := dur.Nanoseconds() / int64(time.Millisecond) - if ms == 0 { - ms = 1 - } - - qr.Duration = ms - } - - qr.QueryConfig.ID = q.ID - res.Queries[i] = qr - } - - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// DefaultRP will add the default retention policy to the QC if one has not been specified -func (s *Service) DefaultRP(ctx context.Context, qc *chronograf.QueryConfig, src *chronograf.Source) error { - // Only need to find the default RP IFF the qc's rp is empty - if qc.RetentionPolicy != "" { - return nil - } - - // For queries without databases, measurements, or fields we will not - // be able to find an RP - if qc.Database == "" || qc.Measurement == "" || len(qc.Fields) == 0 { - return nil - } - - db := s.Databases - if err := db.Connect(ctx, src); err != nil { - return fmt.Errorf("unable to connect to source: %v", err) - } - - rps, err := db.AllRP(ctx, qc.Database) - if err != nil { - return fmt.Errorf("unable to load RPs from DB %s: %v", qc.Database, err) - } - - for _, rp := range rps { - if rp.Default { - qc.RetentionPolicy = rp.Name - return nil - } - } - - return nil -} diff --git a/chronograf/server/queries_test.go b/chronograf/server/queries_test.go deleted file mode 100644 index c8008861e46..00000000000 --- a/chronograf/server/queries_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package server - -import ( - "bytes" - "context" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" -) - -func TestService_Queries(t *testing.T) { - tests := []struct { - name string - SourcesStore chronograf.SourcesStore - ID string - w *httptest.ResponseRecorder - r *http.Request - want string - }{ - { - name: "bad json", - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: ID, - }, nil - }, - }, - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`howdy`))), - want: `{"code":400,"message":"unparsable JSON"}`, - }, - { - name: "bad id", - ID: "howdy", - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte{})), - want: `{"code":422,"message":"error converting ID howdy"}`, - }, - { - name: "query with no template vars", - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: ID, - }, nil - }, - }, - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`{ - "queries": [ - { - "query": "SELECT \"pingReq\" FROM db.\"monitor\".\"httpd\" WHERE time > now() - 1m", - "id": "82b60d37-251e-4afe-ac93-ca20a3642b11" - } - ]}`))), - want: `{"queries":[{"durationMs":59999,"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","query":"SELECT \"pingReq\" FROM db.\"monitor\".\"httpd\" WHERE time \u003e now() - 1m","queryConfig":{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","database":"db","measurement":"httpd","retentionPolicy":"monitor","fields":[{"value":"pingReq","type":"field","alias":""}],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":null,"range":{"upper":"","lower":"now() - 1m"},"shifts":[]},"queryAST":{"condition":{"expr":"binary","op":"\u003e","lhs":{"expr":"reference","val":"time"},"rhs":{"expr":"binary","op":"-","lhs":{"expr":"call","name":"now"},"rhs":{"expr":"literal","val":"1m","type":"duration"}}},"fields":[{"column":{"expr":"reference","val":"pingReq"}}],"sources":[{"database":"db","retentionPolicy":"monitor","name":"httpd","type":"measurement"}]}}]} -`, - }, - { - name: "query with unparsable query", - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, ID int) (chronograf.Source, error) { - return chronograf.Source{ - ID: ID, - }, nil - }, - }, - ID: "1", - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`{ - "queries": [ - { - "query": "SHOW DATABASES", - "id": "82b60d37-251e-4afe-ac93-ca20a3642b11" - } - ]}`))), - want: `{"queries":[{"durationMs":0,"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","query":"SHOW DATABASES","queryConfig":{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SHOW DATABASES","range":null,"shifts":[]}}]} -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.r = tt.r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.ID, - }, - })) - s := &Service{ - Store: &mocks.Store{ - SourcesStore: tt.SourcesStore, - }, - Logger: &mocks.TestLogger{}, - } - s.Queries(tt.w, tt.r) - got := tt.w.Body.String() - if got != tt.want { - t.Errorf("got:\n%s\nwant:\n%s\n", got, tt.want) - } - }) - } -} diff --git a/chronograf/server/queryconfig.go b/chronograf/server/queryconfig.go deleted file mode 100644 index 6f575a688f9..00000000000 --- a/chronograf/server/queryconfig.go +++ /dev/null @@ -1,51 +0,0 @@ -package server - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -// ToQueryConfig converts InfluxQL into queryconfigs -// If influxql cannot be represented by a full query config, then, the -// query config's raw text is set to the query. -func ToQueryConfig(query string) chronograf.QueryConfig { - qc, err := influx.Convert(query) - if err == nil { - return qc - } - return chronograf.QueryConfig{ - RawText: &query, - Fields: []chronograf.Field{}, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - Tags: make(map[string][]string), - } -} - -var validFieldTypes = map[string]bool{ - "func": true, - "field": true, - "integer": true, - "number": true, - "regex": true, - "wildcard": true, -} - -// ValidateQueryConfig checks any query config input -func ValidateQueryConfig(q *chronograf.QueryConfig) error { - for _, fld := range q.Fields { - invalid := fmt.Errorf(`invalid field type "%s" ; expect func, field, integer, number, regex, wildcard`, fld.Type) - if !validFieldTypes[fld.Type] { - return invalid - } - for _, arg := range fld.Args { - if !validFieldTypes[arg.Type] { - return invalid - } - } - } - return nil -} diff --git a/chronograf/server/queryconfig_test.go b/chronograf/server/queryconfig_test.go deleted file mode 100644 index c6ea49f8378..00000000000 --- a/chronograf/server/queryconfig_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package server - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestValidateQueryConfig(t *testing.T) { - tests := []struct { - name string - q *chronograf.QueryConfig - wantErr bool - }{ - { - name: "invalid field type", - q: &chronograf.QueryConfig{ - Fields: []chronograf.Field{ - { - Type: "invalid", - }, - }, - }, - wantErr: true, - }, - { - name: "invalid field args", - q: &chronograf.QueryConfig{ - Fields: []chronograf.Field{ - { - Type: "func", - Args: []chronograf.Field{ - { - Type: "invalid", - }, - }, - }, - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := ValidateQueryConfig(tt.q); (err != nil) != tt.wantErr { - t.Errorf("ValidateQueryConfig() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/chronograf/server/redoc.go b/chronograf/server/redoc.go deleted file mode 100644 index 4cc39bf27dc..00000000000 --- a/chronograf/server/redoc.go +++ /dev/null @@ -1,39 +0,0 @@ -package server - -import ( - "fmt" - "net/http" -) - -const index = ` - - - Chronograf API - - - - - - - - - - -` - -// Redoc servers the swagger JSON using the redoc package. -func Redoc(swagger string) http.HandlerFunc { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write([]byte(fmt.Sprintf(index, swagger))) - }) -} diff --git a/chronograf/server/routes.go b/chronograf/server/routes.go deleted file mode 100644 index db5144fd1cc..00000000000 --- a/chronograf/server/routes.go +++ /dev/null @@ -1,122 +0,0 @@ -package server - -import ( - "fmt" - "net/http" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" -) - -// AuthRoute are the routes for each type of OAuth2 provider -type AuthRoute struct { - Name string `json:"name"` // Name uniquely identifies the provider - Label string `json:"label"` // Label is a user-facing string to present in the UI - Login string `json:"login"` // Login is the route to the login redirect path - Logout string `json:"logout"` // Logout is the route to the logout redirect path - Callback string `json:"callback"` // Callback is the route the provider calls to exchange the code/state -} - -// AuthRoutes contains all OAuth2 provider routes. -type AuthRoutes []AuthRoute - -// Lookup searches all the routes for a specific provider -func (r *AuthRoutes) Lookup(provider string) (AuthRoute, bool) { - for _, route := range *r { - if route.Name == provider { - return route, true - } - } - return AuthRoute{}, false -} - -type getRoutesResponse struct { - Layouts string `json:"layouts"` // Location of the layouts endpoint - Users string `json:"users"` // Location of the users endpoint - AllUsers string `json:"allUsers"` // Location of the raw users endpoint - Organizations string `json:"organizations"` // Location of the organizations endpoint - Mappings string `json:"mappings"` // Location of the application mappings endpoint - Sources string `json:"sources"` // Location of the sources endpoint - Me string `json:"me"` // Location of the me endpoint - Environment string `json:"environment"` // Location of the environment endpoint - Dashboards string `json:"dashboards"` // Location of the dashboards endpoint - Config getConfigLinksResponse `json:"config"` // Location of the config endpoint and its various sections - Cells string `json:"cells"` // Location of the v2 cells - DashboardsV2 string `json:"dashboardsv2"` // Location of the v2 dashboards - Auth []AuthRoute `json:"auth"` // Location of all auth routes. - Logout *string `json:"logout,omitempty"` // Location of the logout route for all auth routes - ExternalLinks getExternalLinksResponse `json:"external"` // All external links for the client to use - OrganizationConfig getOrganizationConfigLinksResponse `json:"orgConfig"` // Location of the organization config endpoint - Flux getFluxLinksResponse `json:"flux"` -} - -// AllRoutes is a handler that returns all links to resources in Chronograf server, as well as -// external links for the client to know about, such as for JSON feeds or custom side nav buttons. -// Optionally, routes for authentication can be returned. -type AllRoutes struct { - GetPrincipal func(r *http.Request) oauth2.Principal // GetPrincipal is used to retrieve the principal on http request. - AuthRoutes []AuthRoute // Location of all auth routes. If no auth, this can be empty. - LogoutLink string // Location of the logout route for all auth routes. If no auth, this can be empty. - StatusFeed string // External link to the JSON Feed for the News Feed on the client's Status Page - CustomLinks map[string]string // Custom external links for client's User menu, as passed in via CLI/ENV - Logger chronograf.Logger -} - -// serveHTTP returns all top level routes and external links within chronograf -func (a *AllRoutes) ServeHTTP(w http.ResponseWriter, r *http.Request) { - customLinks, err := NewCustomLinks(a.CustomLinks) - if err != nil { - Error(w, http.StatusInternalServerError, err.Error(), a.Logger) - return - } - - org := "default" - if a.GetPrincipal != nil { - // If there is a principal, use the organization to populate the users routes - // otherwise use the default organization - if p := a.GetPrincipal(r); p.Organization != "" { - org = p.Organization - } - } - - routes := getRoutesResponse{ - Sources: "/chronograf/v1/sources", - Layouts: "/chronograf/v1/layouts", - Users: fmt.Sprintf("/chronograf/v1/organizations/%s/users", org), - AllUsers: "/chronograf/v1/users", - Organizations: "/chronograf/v1/organizations", - Me: "/chronograf/v1/me", - Environment: "/chronograf/v1/env", - Mappings: "/chronograf/v1/mappings", - Dashboards: "/chronograf/v1/dashboards", - DashboardsV2: "/chronograf/v2/dashboards", - Cells: "/chronograf/v2/cells", - Config: getConfigLinksResponse{ - Self: "/chronograf/v1/config", - Auth: "/chronograf/v1/config/auth", - }, - OrganizationConfig: getOrganizationConfigLinksResponse{ - Self: "/chronograf/v1/org_config", - LogViewer: "/chronograf/v1/org_config/logviewer", - }, - Auth: make([]AuthRoute, len(a.AuthRoutes)), // We want to return at least an empty array, rather than null - ExternalLinks: getExternalLinksResponse{ - StatusFeed: &a.StatusFeed, - CustomLinks: customLinks, - }, - Flux: getFluxLinksResponse{ - Self: "/chronograf/v1/flux", - AST: "/chronograf/v1/flux/ast", - Suggestions: "/chronograf/v1/flux/suggestions", - }, - } - - // The JSON response will have no field present for the LogoutLink if there is no logout link. - if a.LogoutLink != "" { - routes.Logout = &a.LogoutLink - } - - copy(routes.Auth, a.AuthRoutes) - - encodeJSON(w, http.StatusOK, routes, a.Logger) -} diff --git a/chronograf/server/routes_test.go b/chronograf/server/routes_test.go deleted file mode 100644 index 5333c2dc11f..00000000000 --- a/chronograf/server/routes_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package server - -import ( - "encoding/json" - "io/ioutil" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestAllRoutes(t *testing.T) { - logger := &chronograf.NoopLogger{} - handler := &AllRoutes{ - Logger: logger, - } - req := httptest.NewRequest("GET", "http://docbrowns-inventions.com", nil) - w := httptest.NewRecorder() - handler.ServeHTTP(w, req) - - resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - - if err != nil { - t.Error("TestAllRoutes not able to retrieve body") - } - var routes getRoutesResponse - if err := json.Unmarshal(body, &routes); err != nil { - t.Error("TestAllRoutes not able to unmarshal JSON response") - } - want := `{"dashboardsv2":"/chronograf/v2/dashboards","orgConfig":{"self":"/chronograf/v1/org_config","logViewer":"/chronograf/v1/org_config/logviewer"},"cells":"/chronograf/v2/cells","layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":""},"flux":{"ast":"/chronograf/v1/flux/ast","self":"/chronograf/v1/flux","suggestions":"/chronograf/v1/flux/suggestions"}} -` - - eq, err := jsonEqual(want, string(body)) - if err != nil { - t.Fatalf("error decoding json: %v", err) - } - if !eq { - t.Errorf("TestAllRoutes\nwanted\n*%s*\ngot\n*%s*", want, string(body)) - } - -} - -func TestAllRoutesWithAuth(t *testing.T) { - logger := &chronograf.NoopLogger{} - handler := &AllRoutes{ - AuthRoutes: []AuthRoute{ - { - Name: "github", - Label: "GitHub", - Login: "/oauth/github/login", - Logout: "/oauth/github/logout", - Callback: "/oauth/github/callback", - }, - }, - LogoutLink: "/oauth/logout", - Logger: logger, - } - req := httptest.NewRequest("GET", "http://docbrowns-inventions.com", nil) - w := httptest.NewRecorder() - handler.ServeHTTP(w, req) - - resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - - if err != nil { - t.Error("TestAllRoutesWithAuth not able to retrieve body") - } - var routes getRoutesResponse - if err := json.Unmarshal(body, &routes); err != nil { - t.Error("TestAllRoutesWithAuth not able to unmarshal JSON response") - } - want := `{"dashboardsv2":"/chronograf/v2/dashboards","orgConfig":{"self":"/chronograf/v1/org_config","logViewer":"/chronograf/v1/org_config/logviewer"},"cells":"/chronograf/v2/cells","layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[{"name":"github","label":"GitHub","login":"/oauth/github/login","logout":"/oauth/github/logout","callback":"/oauth/github/callback"}],"logout":"/oauth/logout","external":{"statusFeed":""},"flux":{"ast":"/chronograf/v1/flux/ast","self":"/chronograf/v1/flux","suggestions":"/chronograf/v1/flux/suggestions"}} -` - eq, err := jsonEqual(want, string(body)) - if err != nil { - t.Fatalf("error decoding json: %v", err) - } - if !eq { - t.Errorf("TestAllRoutesWithAuth\nwanted\n*%s*\ngot\n*%s*", want, string(body)) - } -} - -func TestAllRoutesWithExternalLinks(t *testing.T) { - statusFeedURL := "http://pineapple.life/feed.json" - customLinks := map[string]string{ - "cubeapple": "https://cube.apple", - } - logger := &chronograf.NoopLogger{} - handler := &AllRoutes{ - StatusFeed: statusFeedURL, - CustomLinks: customLinks, - Logger: logger, - } - req := httptest.NewRequest("GET", "http://docbrowns-inventions.com", nil) - w := httptest.NewRecorder() - handler.ServeHTTP(w, req) - - resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - - if err != nil { - t.Error("TestAllRoutesWithExternalLinks not able to retrieve body") - } - var routes getRoutesResponse - if err := json.Unmarshal(body, &routes); err != nil { - t.Error("TestAllRoutesWithExternalLinks not able to unmarshal JSON response") - } - want := `{"dashboardsv2":"/chronograf/v2/dashboards","orgConfig":{"self":"/chronograf/v1/org_config","logViewer":"/chronograf/v1/org_config/logviewer"},"cells":"/chronograf/v2/cells","layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":"http://pineapple.life/feed.json","custom":[{"name":"cubeapple","url":"https://cube.apple"}]},"flux":{"ast":"/chronograf/v1/flux/ast","self":"/chronograf/v1/flux","suggestions":"/chronograf/v1/flux/suggestions"}} -` - eq, err := jsonEqual(want, string(body)) - if err != nil { - t.Fatalf("error decoding json: %v", err) - } - if !eq { - t.Errorf("TestAllRoutesWithExternalLinks\nwanted\n*%s*\ngot\n*%s*", want, string(body)) - } -} diff --git a/chronograf/server/server.go b/chronograf/server/server.go deleted file mode 100644 index 87c28cf8490..00000000000 --- a/chronograf/server/server.go +++ /dev/null @@ -1,572 +0,0 @@ -package server - -import ( - "context" - "crypto/tls" - "fmt" - "log" - "math/rand" - "net" - "net/http" - "net/url" - "os" - "path" - "regexp" - "runtime" - "strconv" - "time" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt" - idgen "github.com/influxdata/influxdb/v2/chronograf/id" - "github.com/influxdata/influxdb/v2/chronograf/influx" - "github.com/influxdata/influxdb/v2/chronograf/oauth2" - client "github.com/influxdata/usage-client/v1" - flags "github.com/jessevdk/go-flags" - "github.com/tylerb/graceful" - bbolt "go.etcd.io/bbolt" -) - -var ( - startTime time.Time -) - -func init() { - startTime = time.Now().UTC() -} - -// Server for the chronograf API -type Server struct { - Host string `long:"host" description:"The IP to listen on" default:"0.0.0.0" env:"HOST"` - Port int `long:"port" description:"The port to listen on for insecure connections, defaults to a random value" default:"8888" env:"PORT"` - - PprofEnabled bool `long:"pprof-enabled" description:"Enable the /debug/pprof/* HTTP routes" env:"PPROF_ENABLED"` - - Cert flags.Filename `long:"cert" description:"Path to PEM encoded public key certificate. " env:"TLS_CERTIFICATE"` - Key flags.Filename `long:"key" description:"Path to private key associated with given certificate. " env:"TLS_PRIVATE_KEY"` - - InfluxDBURL string `long:"influxdb-url" description:"Location of your InfluxDB instance" env:"INFLUXDB_URL"` - InfluxDBUsername string `long:"influxdb-username" description:"Username for your InfluxDB instance" env:"INFLUXDB_USERNAME"` - InfluxDBPassword string `long:"influxdb-password" description:"Password for your InfluxDB instance" env:"INFLUXDB_PASSWORD"` - - KapacitorURL string `long:"kapacitor-url" description:"Location of your Kapacitor instance" env:"KAPACITOR_URL"` - KapacitorUsername string `long:"kapacitor-username" description:"Username of your Kapacitor instance" env:"KAPACITOR_USERNAME"` - KapacitorPassword string `long:"kapacitor-password" description:"Password of your Kapacitor instance" env:"KAPACITOR_PASSWORD"` - - NewSources string `long:"new-sources" description:"Config for adding a new InfluxDB source and Kapacitor server, in JSON as an array of objects, and surrounded by single quotes. E.g. --new-sources='[{\"influxdb\":{\"name\":\"Influx 1\",\"username\":\"user1\",\"password\":\"pass1\",\"url\":\"http://localhost:8086\",\"metaUrl\":\"http://metaurl.com\",\"type\":\"influx-enterprise\",\"insecureSkipVerify\":false,\"default\":true,\"telegraf\":\"telegraf\",\"sharedSecret\":\"cubeapples\"},\"kapacitor\":{\"name\":\"Kapa 1\",\"url\":\"http://localhost:9092\",\"active\":true}}]'" env:"NEW_SOURCES" hidden:"true"` - - Develop bool `short:"d" long:"develop" description:"Run server in develop mode."` - BoltPath string `short:"b" long:"bolt-path" description:"Full path to boltDB file (e.g. './chronograf-v1.db')" env:"BOLT_PATH" default:"chronograf-v1.db"` - CannedPath string `short:"c" long:"canned-path" description:"Path to directory of pre-canned application layouts (/usr/share/chronograf/canned)" env:"CANNED_PATH" default:"canned"` - ResourcesPath string `long:"resources-path" description:"Path to directory of pre-canned dashboards, sources, kapacitors, and organizations (/usr/share/chronograf/resources)" env:"RESOURCES_PATH" default:"canned"` - TokenSecret string `short:"t" long:"token-secret" description:"Secret to sign tokens" env:"TOKEN_SECRET"` - JwksURL string `long:"jwks-url" description:"URL that returns OpenID Key Discovery JWKS document." env:"JWKS_URL"` - UseIDToken bool `long:"use-id-token" description:"Enable id_token processing." env:"USE_ID_TOKEN"` - AuthDuration time.Duration `long:"auth-duration" default:"720h" description:"Total duration of cookie life for authentication (in hours). 0 means authentication expires on browser close." env:"AUTH_DURATION"` - - GithubClientID string `short:"i" long:"github-client-id" description:"Github Client ID for OAuth 2 support" env:"GH_CLIENT_ID"` - GithubClientSecret string `short:"s" long:"github-client-secret" description:"Github Client Secret for OAuth 2 support" env:"GH_CLIENT_SECRET"` - GithubOrgs []string `short:"o" long:"github-organization" description:"Github organization user is required to have active membership" env:"GH_ORGS" env-delim:","` - - GoogleClientID string `long:"google-client-id" description:"Google Client ID for OAuth 2 support" env:"GOOGLE_CLIENT_ID"` - GoogleClientSecret string `long:"google-client-secret" description:"Google Client Secret for OAuth 2 support" env:"GOOGLE_CLIENT_SECRET"` - GoogleDomains []string `long:"google-domains" description:"Google email domain user is required to have active membership" env:"GOOGLE_DOMAINS" env-delim:","` - PublicURL string `long:"public-url" description:"Full public URL used to access Chronograf from a web browser. Used for OAuth2 authentication. (http://localhost:8888)" env:"PUBLIC_URL"` - - HerokuClientID string `long:"heroku-client-id" description:"Heroku Client ID for OAuth 2 support" env:"HEROKU_CLIENT_ID"` - HerokuSecret string `long:"heroku-secret" description:"Heroku Secret for OAuth 2 support" env:"HEROKU_SECRET"` - HerokuOrganizations []string `long:"heroku-organization" description:"Heroku Organization Memberships a user is required to have for access to Chronograf (comma separated)" env:"HEROKU_ORGS" env-delim:","` - - GenericName string `long:"generic-name" description:"Generic OAuth2 name presented on the login page" env:"GENERIC_NAME"` - GenericClientID string `long:"generic-client-id" description:"Generic OAuth2 Client ID. Can be used own OAuth2 service." env:"GENERIC_CLIENT_ID"` - GenericClientSecret string `long:"generic-client-secret" description:"Generic OAuth2 Client Secret" env:"GENERIC_CLIENT_SECRET"` - GenericScopes []string `long:"generic-scopes" description:"Scopes requested by provider of web client." default:"user:email" env:"GENERIC_SCOPES" env-delim:","` - GenericDomains []string `long:"generic-domains" description:"Email domain users' email address to have (example.com)" env:"GENERIC_DOMAINS" env-delim:","` - GenericAuthURL string `long:"generic-auth-url" description:"OAuth 2.0 provider's authorization endpoint URL" env:"GENERIC_AUTH_URL"` - GenericTokenURL string `long:"generic-token-url" description:"OAuth 2.0 provider's token endpoint URL" env:"GENERIC_TOKEN_URL"` - GenericAPIURL string `long:"generic-api-url" description:"URL that returns OpenID UserInfo compatible information." env:"GENERIC_API_URL"` - GenericAPIKey string `long:"generic-api-key" description:"JSON lookup key into OpenID UserInfo. (Azure should be userPrincipalName)" default:"email" env:"GENERIC_API_KEY"` - - Auth0Domain string `long:"auth0-domain" description:"Subdomain of auth0.com used for Auth0 OAuth2 authentication" env:"AUTH0_DOMAIN"` - Auth0ClientID string `long:"auth0-client-id" description:"Auth0 Client ID for OAuth2 support" env:"AUTH0_CLIENT_ID"` - Auth0ClientSecret string `long:"auth0-client-secret" description:"Auth0 Client Secret for OAuth2 support" env:"AUTH0_CLIENT_SECRET"` - Auth0Organizations []string `long:"auth0-organizations" description:"Auth0 organizations permitted to access Chronograf (comma separated)" env:"AUTH0_ORGS" env-delim:","` - Auth0SuperAdminOrg string `long:"auth0-superadmin-org" description:"Auth0 organization from which users are automatically granted SuperAdmin status" env:"AUTH0_SUPERADMIN_ORG"` - - StatusFeedURL string `long:"status-feed-url" description:"URL of a JSON Feed to display as a News Feed on the client Status page." default:"https://www.influxdata.com/feed/json" env:"STATUS_FEED_URL"` - CustomLinks map[string]string `long:"custom-link" description:"Custom link to be added to the client User menu. Multiple links can be added by using multiple of the same flag with different 'name:url' values, or as an environment variable with comma-separated 'name:url' values. E.g. via flags: '--custom-link=InfluxData:https://www.influxdata.com --custom-link=Chronograf:https://github.com/influxdata/influxdb/chronograf'. E.g. via environment variable: 'export CUSTOM_LINKS=InfluxData:https://www.influxdata.com,Chronograf:https://github.com/influxdata/influxdb/chronograf'" env:"CUSTOM_LINKS" env-delim:","` - TelegrafSystemInterval time.Duration `long:"telegraf-system-interval" default:"1m" description:"Duration used in the GROUP BY time interval for the hosts list" env:"TELEGRAF_SYSTEM_INTERVAL"` - - ReportingDisabled bool `short:"r" long:"reporting-disabled" description:"Disable reporting of usage stats (os,arch,version,cluster_id,uptime) once every 24hr" env:"REPORTING_DISABLED"` - LogLevel string `short:"l" long:"log-level" value-name:"choice" choice:"debug" choice:"info" choice:"error" default:"info" description:"Set the logging level" env:"LOG_LEVEL"` - Basepath string `short:"p" long:"basepath" description:"A URL path prefix under which all chronograf routes will be mounted. (Note: PREFIX_ROUTES has been deprecated. Now, if basepath is set, all routes will be prefixed with it.)" env:"BASE_PATH"` - ShowVersion bool `short:"v" long:"version" description:"Show Chronograf version info"` - BuildInfo chronograf.BuildInfo - Listener net.Listener - handler http.Handler -} - -func provide(p oauth2.Provider, m oauth2.Mux, ok func() bool) func(func(oauth2.Provider, oauth2.Mux)) { - return func(configure func(oauth2.Provider, oauth2.Mux)) { - if ok() { - configure(p, m) - } - } -} - -// UseGithub validates the CLI parameters to enable github oauth support -func (s *Server) UseGithub() bool { - return s.TokenSecret != "" && s.GithubClientID != "" && s.GithubClientSecret != "" -} - -// UseGoogle validates the CLI parameters to enable google oauth support -func (s *Server) UseGoogle() bool { - return s.TokenSecret != "" && s.GoogleClientID != "" && s.GoogleClientSecret != "" && s.PublicURL != "" -} - -// UseHeroku validates the CLI parameters to enable heroku oauth support -func (s *Server) UseHeroku() bool { - return s.TokenSecret != "" && s.HerokuClientID != "" && s.HerokuSecret != "" -} - -// UseAuth0 validates the CLI parameters to enable Auth0 oauth support -func (s *Server) UseAuth0() bool { - return s.Auth0ClientID != "" && s.Auth0ClientSecret != "" -} - -// UseGenericOAuth2 validates the CLI parameters to enable generic oauth support -func (s *Server) UseGenericOAuth2() bool { - return s.TokenSecret != "" && s.GenericClientID != "" && - s.GenericClientSecret != "" && s.GenericAuthURL != "" && - s.GenericTokenURL != "" -} - -func (s *Server) githubOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) { - gh := oauth2.Github{ - ClientID: s.GithubClientID, - ClientSecret: s.GithubClientSecret, - Orgs: s.GithubOrgs, - Logger: logger, - } - jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL) - ghMux := oauth2.NewAuthMux(&gh, auth, jwt, s.Basepath, logger, s.UseIDToken) - return &gh, ghMux, s.UseGithub -} - -func (s *Server) googleOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) { - redirectURL := s.PublicURL + s.Basepath + "/oauth/google/callback" - google := oauth2.Google{ - ClientID: s.GoogleClientID, - ClientSecret: s.GoogleClientSecret, - Domains: s.GoogleDomains, - RedirectURL: redirectURL, - Logger: logger, - } - jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL) - goMux := oauth2.NewAuthMux(&google, auth, jwt, s.Basepath, logger, s.UseIDToken) - return &google, goMux, s.UseGoogle -} - -func (s *Server) herokuOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) { - heroku := oauth2.Heroku{ - ClientID: s.HerokuClientID, - ClientSecret: s.HerokuSecret, - Organizations: s.HerokuOrganizations, - Logger: logger, - } - jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL) - hMux := oauth2.NewAuthMux(&heroku, auth, jwt, s.Basepath, logger, s.UseIDToken) - return &heroku, hMux, s.UseHeroku -} - -func (s *Server) genericOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) { - gen := oauth2.Generic{ - PageName: s.GenericName, - ClientID: s.GenericClientID, - ClientSecret: s.GenericClientSecret, - RequiredScopes: s.GenericScopes, - Domains: s.GenericDomains, - RedirectURL: s.genericRedirectURL(), - AuthURL: s.GenericAuthURL, - TokenURL: s.GenericTokenURL, - APIURL: s.GenericAPIURL, - APIKey: s.GenericAPIKey, - Logger: logger, - } - jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL) - genMux := oauth2.NewAuthMux(&gen, auth, jwt, s.Basepath, logger, s.UseIDToken) - return &gen, genMux, s.UseGenericOAuth2 -} - -func (s *Server) auth0OAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) { - redirectPath := path.Join(s.Basepath, "oauth", "auth0", "callback") - redirectURL, err := url.Parse(s.PublicURL) - if err != nil { - logger.Error("Error parsing public URL: err:", err) - return &oauth2.Auth0{}, &oauth2.AuthMux{}, func() bool { return false } - } - redirectURL.Path = redirectPath - - auth0, err := oauth2.NewAuth0(s.Auth0Domain, s.Auth0ClientID, s.Auth0ClientSecret, redirectURL.String(), s.Auth0Organizations, logger) - - jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL) - genMux := oauth2.NewAuthMux(&auth0, auth, jwt, s.Basepath, logger, s.UseIDToken) - - if err != nil { - logger.Error("Error parsing Auth0 domain: err:", err) - return &auth0, genMux, func() bool { return false } - } - return &auth0, genMux, s.UseAuth0 -} - -func (s *Server) genericRedirectURL() string { - if s.PublicURL == "" { - return "" - } - - genericName := "generic" - if s.GenericName != "" { - genericName = s.GenericName - } - - publicURL, err := url.Parse(s.PublicURL) - if err != nil { - return "" - } - - publicURL.Path = path.Join(publicURL.Path, s.Basepath, "oauth", genericName, "callback") - return publicURL.String() -} - -func (s *Server) useAuth() bool { - return s.UseGithub() || s.UseGoogle() || s.UseHeroku() || s.UseGenericOAuth2() || s.UseAuth0() -} - -func (s *Server) useTLS() bool { - return s.Cert != "" -} - -// NewListener will an http or https listener depending useTLS() -func (s *Server) NewListener() (net.Listener, error) { - addr := net.JoinHostPort(s.Host, strconv.Itoa(s.Port)) - if !s.useTLS() { - listener, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - return listener, nil - } - - // If no key specified, therefore, we assume it is in the cert - if s.Key == "" { - s.Key = s.Cert - } - - cert, err := tls.LoadX509KeyPair(string(s.Cert), string(s.Key)) - if err != nil { - return nil, err - } - - listener, err := tls.Listen("tcp", addr, &tls.Config{ - Certificates: []tls.Certificate{cert}, - }) - - if err != nil { - return nil, err - } - return listener, nil -} - -type builders struct { - Layouts LayoutBuilder - Sources SourcesBuilder - Kapacitors KapacitorBuilder - Dashboards DashboardBuilder - Organizations OrganizationBuilder -} - -func (s *Server) newBuilders(logger chronograf.Logger) builders { - return builders{ - Layouts: &MultiLayoutBuilder{ - Logger: logger, - UUID: &idgen.UUID{}, - CannedPath: s.CannedPath, - }, - Dashboards: &MultiDashboardBuilder{ - Logger: logger, - ID: idgen.NewTime(), - Path: s.ResourcesPath, - }, - Sources: &MultiSourceBuilder{ - InfluxDBURL: s.InfluxDBURL, - InfluxDBUsername: s.InfluxDBUsername, - InfluxDBPassword: s.InfluxDBPassword, - Logger: logger, - ID: idgen.NewTime(), - Path: s.ResourcesPath, - }, - Kapacitors: &MultiKapacitorBuilder{ - KapacitorURL: s.KapacitorURL, - KapacitorUsername: s.KapacitorUsername, - KapacitorPassword: s.KapacitorPassword, - Logger: logger, - ID: idgen.NewTime(), - Path: s.ResourcesPath, - }, - Organizations: &MultiOrganizationBuilder{ - Logger: logger, - Path: s.ResourcesPath, - }, - } -} - -// Serve starts and runs the chronograf server -func (s *Server) Serve(ctx context.Context) error { - logger := &chronograf.NoopLogger{} - _, err := NewCustomLinks(s.CustomLinks) - if err != nil { - logger. - WithField("component", "server"). - WithField("CustomLink", "invalid"). - Error(err) - return err - } - service := openService(ctx, s.BuildInfo, s.BoltPath, s.newBuilders(logger), logger, s.useAuth()) - service.SuperAdminProviderGroups = superAdminProviderGroups{ - auth0: s.Auth0SuperAdminOrg, - } - service.Env = chronograf.Environment{ - TelegrafSystemInterval: s.TelegrafSystemInterval, - } - - if !validBasepath(s.Basepath) { - err := fmt.Errorf("invalid basepath, must follow format \"/mybasepath\"") - logger. - WithField("component", "server"). - WithField("basepath", "invalid"). - Error(err) - return err - } - - providerFuncs := []func(func(oauth2.Provider, oauth2.Mux)){} - - auth := oauth2.NewCookieJWT(s.TokenSecret, s.AuthDuration) - providerFuncs = append(providerFuncs, provide(s.githubOAuth(logger, auth))) - providerFuncs = append(providerFuncs, provide(s.googleOAuth(logger, auth))) - providerFuncs = append(providerFuncs, provide(s.herokuOAuth(logger, auth))) - providerFuncs = append(providerFuncs, provide(s.genericOAuth(logger, auth))) - providerFuncs = append(providerFuncs, provide(s.auth0OAuth(logger, auth))) - - s.handler = NewMux(MuxOpts{ - Develop: s.Develop, - Auth: auth, - Logger: logger, - UseAuth: s.useAuth(), - ProviderFuncs: providerFuncs, - Basepath: s.Basepath, - StatusFeedURL: s.StatusFeedURL, - CustomLinks: s.CustomLinks, - }, service) - - // Add chronograf's version header to all requests - s.handler = Version(s.BuildInfo.Version, s.handler) - - if s.useTLS() { - // Add HSTS to instruct all browsers to change from http to https - s.handler = HSTS(s.handler) - } - - listener, err := s.NewListener() - if err != nil { - logger. - WithField("component", "server"). - Error(err) - return err - } - s.Listener = listener - - // Using a log writer for http server logging - w := logger.Writer() - defer w.Close() - stdLog := log.New(w, "", 0) - - // TODO: Remove graceful when changing to go 1.8 - httpServer := &graceful.Server{ - Server: &http.Server{ - ErrorLog: stdLog, - Handler: s.handler, - }, - Logger: stdLog, - TCPKeepAlive: 5 * time.Second, - } - httpServer.SetKeepAlivesEnabled(true) - - if !s.ReportingDisabled { - go reportUsageStats(s.BuildInfo, logger) - } - scheme := "http" - if s.useTLS() { - scheme = "https" - } - logger. - WithField("component", "server"). - Info("Serving chronograf at ", scheme, "://", s.Listener.Addr()) - - if err := httpServer.Serve(s.Listener); err != nil { - logger. - WithField("component", "server"). - Error(err) - return err - } - - logger. - WithField("component", "server"). - Info("Stopped serving chronograf at ", scheme, "://", s.Listener.Addr()) - - return nil -} - -func NewServiceV2(ctx context.Context, d *bbolt.DB) (*Service, error) { - db := bolt.NewClient() - db.WithDB(d) - - if err := db.Open(ctx, nil, chronograf.BuildInfo{}); err != nil { - return nil, err - } - - logger := &chronograf.NoopLogger{} - - return &Service{ - TimeSeriesClient: &InfluxClient{}, - Store: &DirectStore{ - LayoutsStore: db.LayoutsStore, - DashboardsStore: db.DashboardsStore, - SourcesStore: db.SourcesStore, - ServersStore: db.ServersStore, - OrganizationsStore: db.OrganizationsStore, - UsersStore: db.UsersStore, - ConfigStore: db.ConfigStore, - MappingsStore: db.MappingsStore, - OrganizationConfigStore: db.OrganizationConfigStore, - }, - // TODO(desa): what to do about logger - Logger: logger, - Databases: &influx.Client{ - Logger: logger, - }, - }, nil -} - -func openService(ctx context.Context, buildInfo chronograf.BuildInfo, boltPath string, builder builders, logger chronograf.Logger, useAuth bool) Service { - db := bolt.NewClient() - db.Path = boltPath - - if err := db.Open(ctx, logger, buildInfo, bolt.WithBackup()); err != nil { - logger. - WithField("component", "boltstore"). - Error(err) - os.Exit(1) - } - - layouts, err := builder.Layouts.Build(db.LayoutsStore) - if err != nil { - logger. - WithField("component", "LayoutsStore"). - Error("Unable to construct a MultiLayoutsStore", err) - os.Exit(1) - } - - dashboards, err := builder.Dashboards.Build(db.DashboardsStore) - if err != nil { - logger. - WithField("component", "DashboardsStore"). - Error("Unable to construct a MultiDashboardsStore", err) - os.Exit(1) - } - sources, err := builder.Sources.Build(db.SourcesStore) - if err != nil { - logger. - WithField("component", "SourcesStore"). - Error("Unable to construct a MultiSourcesStore", err) - os.Exit(1) - } - - kapacitors, err := builder.Kapacitors.Build(db.ServersStore) - if err != nil { - logger. - WithField("component", "KapacitorStore"). - Error("Unable to construct a MultiKapacitorStore", err) - os.Exit(1) - } - - organizations, err := builder.Organizations.Build(db.OrganizationsStore) - if err != nil { - logger. - WithField("component", "OrganizationsStore"). - Error("Unable to construct a MultiOrganizationStore", err) - os.Exit(1) - } - - return Service{ - TimeSeriesClient: &InfluxClient{}, - Store: &Store{ - LayoutsStore: layouts, - DashboardsStore: dashboards, - SourcesStore: sources, - ServersStore: kapacitors, - OrganizationsStore: organizations, - UsersStore: db.UsersStore, - ConfigStore: db.ConfigStore, - MappingsStore: db.MappingsStore, - OrganizationConfigStore: db.OrganizationConfigStore, - }, - Logger: logger, - UseAuth: useAuth, - Databases: &influx.Client{Logger: logger}, - } -} - -// reportUsageStats starts periodic server reporting. -func reportUsageStats(bi chronograf.BuildInfo, logger chronograf.Logger) { - rand.Seed(time.Now().UTC().UnixNano()) - serverID := strconv.FormatUint(uint64(rand.Int63()), 10) - reporter := client.New("") - values := client.Values{ - "os": runtime.GOOS, - "arch": runtime.GOARCH, - "version": bi.Version, - "cluster_id": serverID, - "uptime": time.Since(startTime).Seconds(), - } - l := logger.WithField("component", "usage"). - WithField("reporting_addr", reporter.URL). - WithField("freq", "24h"). - WithField("stats", "os,arch,version,cluster_id,uptime") - l.Info("Reporting usage stats") - _, _ = reporter.Save(clientUsage(values)) - - ticker := time.NewTicker(24 * time.Hour) - defer ticker.Stop() - for { - <-ticker.C - values["uptime"] = time.Since(startTime).Seconds() - l.Debug("Reporting usage stats") - go reporter.Save(clientUsage(values)) - } -} - -func clientUsage(values client.Values) *client.Usage { - return &client.Usage{ - Product: "chronograf-ng", - Data: []client.UsageData{ - { - Values: values, - }, - }, - } -} - -func validBasepath(basepath string) bool { - re := regexp.MustCompile(`(\/{1}[\w-]+)+`) - return re.ReplaceAllLiteralString(basepath, "") == "" -} diff --git a/chronograf/server/server_test.go b/chronograf/server/server_test.go deleted file mode 100644 index 9a8591cde0e..00000000000 --- a/chronograf/server/server_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package server - -import ( - "context" - "net/http" - "testing" - - "github.com/bouk/httprouter" -) - -// WithContext is a helper function to cut down on boilerplate in server test files -func WithContext(ctx context.Context, r *http.Request, kv map[string]string) *http.Request { - params := make(httprouter.Params, 0, len(kv)) - for k, v := range kv { - params = append(params, httprouter.Param{ - Key: k, - Value: v, - }) - } - return r.WithContext(httprouter.WithParams(ctx, params)) -} - -func Test_validBasepath(t *testing.T) { - type args struct { - basepath string - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "Basepath can be empty", - args: args{ - basepath: "", - }, - want: true, - }, - { - name: "Basepath is not empty and valid", - args: args{ - basepath: "/russ", - }, - want: true, - }, - { - name: "Basepath can include numbers, hyphens, and underscores", - args: args{ - basepath: "/3shishka-bob/-rus4s_rus-1_s-", - }, - want: true, - }, - { - name: "Basepath is not empty and invalid - no slashes", - args: args{ - basepath: "russ", - }, - want: false, - }, - { - name: "Basepath is not empty and invalid - extra slashes", - args: args{ - basepath: "//russ//", - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := validBasepath(tt.args.basepath); got != tt.want { - t.Errorf("validBasepath() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/chronograf/server/service.go b/chronograf/server/service.go deleted file mode 100644 index c98750657ce..00000000000 --- a/chronograf/server/service.go +++ /dev/null @@ -1,60 +0,0 @@ -package server - -import ( - "context" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/enterprise" - "github.com/influxdata/influxdb/v2/chronograf/influx" -) - -// Service handles REST calls to the persistence -type Service struct { - Store DataStore - TimeSeriesClient TimeSeriesClient - Logger chronograf.Logger - UseAuth bool - SuperAdminProviderGroups superAdminProviderGroups - Env chronograf.Environment - Databases chronograf.Databases -} - -type superAdminProviderGroups struct { - auth0 string -} - -// TimeSeriesClient returns the correct client for a time series database. -type TimeSeriesClient interface { - New(chronograf.Source, chronograf.Logger) (chronograf.TimeSeries, error) -} - -// ErrorMessage is the error response format for all service errors -type ErrorMessage struct { - Code int `json:"code"` - Message string `json:"message"` -} - -// TimeSeries returns a new client connected to a time series database -func (s *Service) TimeSeries(src chronograf.Source) (chronograf.TimeSeries, error) { - return s.TimeSeriesClient.New(src, s.Logger) -} - -// InfluxClient returns a new client to connect to OSS or Enterprise -type InfluxClient struct{} - -// New creates a client to connect to OSS or enterprise -func (c *InfluxClient) New(src chronograf.Source, logger chronograf.Logger) (chronograf.TimeSeries, error) { - client := &influx.Client{ - Logger: logger, - } - if err := client.Connect(context.TODO(), &src); err != nil { - return nil, err - } - if src.Type == chronograf.InfluxEnterprise && src.MetaURL != "" { - tls := strings.Contains(src.MetaURL, "https") - insecure := src.InsecureSkipVerify - return enterprise.NewClientWithTimeSeries(logger, src.MetaURL, influx.DefaultAuthorization(&src), tls, insecure, client) - } - return client, nil -} diff --git a/chronograf/server/services.go b/chronograf/server/services.go deleted file mode 100644 index 2457895fdae..00000000000 --- a/chronograf/server/services.go +++ /dev/null @@ -1,352 +0,0 @@ -package server - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/flux" -) - -type postServiceRequest struct { - Name *string `json:"name"` // User facing name of service instance.; Required: true - URL *string `json:"url"` // URL for the service backend (e.g. http://localhost:9092);/ Required: true - Type *string `json:"type"` // Type is the kind of service (e.g. flux); Required - Username string `json:"username,omitempty"` // Username for authentication to service - Password string `json:"password,omitempty"` - InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the service is accepted. - Organization string `json:"organization"` // Organization is the organization ID that resource belongs to - Metadata map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service -} - -func (p *postServiceRequest) Valid(defaultOrgID string) error { - if p.Name == nil || p.URL == nil { - return fmt.Errorf("name and url required") - } - - if p.Type == nil { - return fmt.Errorf("type required") - } - - if p.Organization == "" { - p.Organization = defaultOrgID - } - - url, err := url.ParseRequestURI(*p.URL) - if err != nil { - return fmt.Errorf("invalid source URI: %v", err) - } - if len(url.Scheme) == 0 { - return fmt.Errorf("invalid URL; no URL scheme defined") - } - - return nil -} - -type serviceLinks struct { - Proxy string `json:"proxy"` // URL location of proxy endpoint for this source - Self string `json:"self"` // Self link mapping to this resource - Source string `json:"source"` // URL location of the parent source -} - -type service struct { - ID int `json:"id,string"` // Unique identifier representing a service instance. - SrcID int `json:"sourceID,string"` // SrcID of the data source - Name string `json:"name"` // User facing name of service instance. - URL string `json:"url"` // URL for the service backend (e.g. http://localhost:9092) - Username string `json:"username,omitempty"` // Username for authentication to service - Password string `json:"password,omitempty"` - InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the service is accepted. - Type string `json:"type"` // Type is the kind of service (e.g. flux) - Metadata map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service - Links serviceLinks `json:"links"` // Links are URI locations related to service -} - -func newService(srv chronograf.Server) service { - if srv.Metadata == nil { - srv.Metadata = make(map[string]interface{}) - } - httpAPISrcs := "/chronograf/v1/sources" - return service{ - ID: srv.ID, - SrcID: srv.SrcID, - Name: srv.Name, - Username: srv.Username, - URL: srv.URL, - InsecureSkipVerify: srv.InsecureSkipVerify, - Type: srv.Type, - Metadata: srv.Metadata, - Links: serviceLinks{ - Self: fmt.Sprintf("%s/%d/services/%d", httpAPISrcs, srv.SrcID, srv.ID), - Source: fmt.Sprintf("%s/%d", httpAPISrcs, srv.SrcID), - Proxy: fmt.Sprintf("%s/%d/services/%d/proxy", httpAPISrcs, srv.SrcID, srv.ID), - }, - } -} - -type services struct { - Services []service `json:"services"` -} - -// NewService adds valid service store store. -func (s *Service) NewService(w http.ResponseWriter, r *http.Request) { - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - _, err = s.Store.Sources(ctx).Get(ctx, srcID) - if err != nil { - notFound(w, srcID, s.Logger) - return - } - - var req postServiceRequest - if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx) - if err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - if err := req.Valid(defaultOrg.ID); err != nil { - invalidData(w, err, s.Logger) - return - } - - if req.Type != nil && req.URL != nil && *req.Type == "flux" { - err := pingFlux(ctx, *req.URL, req.InsecureSkipVerify) - if err != nil { - msg := fmt.Sprintf("Unable to reach flux %s: %v", *req.URL, err) - Error(w, http.StatusGatewayTimeout, msg, s.Logger) - return - } - } - - srv := chronograf.Server{ - SrcID: srcID, - Name: *req.Name, - Username: req.Username, - Password: req.Password, - InsecureSkipVerify: req.InsecureSkipVerify, - URL: *req.URL, - Organization: req.Organization, - Type: *req.Type, - Metadata: req.Metadata, - } - - if srv, err = s.Store.Servers(ctx).Add(ctx, srv); err != nil { - msg := fmt.Errorf("error storing service %v: %v", req, err) - unknownErrorWithMessage(w, msg, s.Logger) - return - } - - res := newService(srv) - location(w, res.Links.Self) - encodeJSON(w, http.StatusCreated, res, s.Logger) -} - -// Services retrieves all services from store. -func (s *Service) Services(w http.ResponseWriter, r *http.Request) { - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - mrSrvs, err := s.Store.Servers(ctx).All(ctx) - if err != nil { - Error(w, http.StatusInternalServerError, "Error loading services", s.Logger) - return - } - - srvs := []service{} - for _, srv := range mrSrvs { - if srv.SrcID == srcID && srv.Type != "" { - srvs = append(srvs, newService(srv)) - } - } - - res := services{ - Services: srvs, - } - - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// ServiceID retrieves a service with ID from store. -func (s *Service) ServiceID(w http.ResponseWriter, r *http.Request) { - id, err := paramID("kid", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - srv, err := s.Store.Servers(ctx).Get(ctx, id) - if err != nil || srv.SrcID != srcID || srv.Type == "" { - notFound(w, id, s.Logger) - return - } - - res := newService(srv) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// RemoveService deletes service from store. -func (s *Service) RemoveService(w http.ResponseWriter, r *http.Request) { - id, err := paramID("kid", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - srv, err := s.Store.Servers(ctx).Get(ctx, id) - if err != nil || srv.SrcID != srcID || srv.Type == "" { - notFound(w, id, s.Logger) - return - } - - if err = s.Store.Servers(ctx).Delete(ctx, srv); err != nil { - unknownErrorWithMessage(w, err, s.Logger) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -type patchServiceRequest struct { - Name *string `json:"name,omitempty"` // User facing name of service instance. - Type *string `json:"type,omitempty"` // Type is the kind of service (e.g. flux) - URL *string `json:"url,omitempty"` // URL for the service - Username *string `json:"username,omitempty"` // Username for service auth - Password *string `json:"password,omitempty"` - InsecureSkipVerify *bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the service is accepted. - Metadata *map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service -} - -func (p *patchServiceRequest) Valid() error { - if p.URL != nil { - url, err := url.ParseRequestURI(*p.URL) - if err != nil { - return fmt.Errorf("invalid service URI: %v", err) - } - if len(url.Scheme) == 0 { - return fmt.Errorf("invalid URL; no URL scheme defined") - } - } - - if p.Type != nil && *p.Type == "" { - return fmt.Errorf("invalid type; type must not be an empty string") - } - - return nil -} - -// UpdateService incrementally updates a service definition in the store -func (s *Service) UpdateService(w http.ResponseWriter, r *http.Request) { - id, err := paramID("kid", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - srcID, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - srv, err := s.Store.Servers(ctx).Get(ctx, id) - if err != nil || srv.SrcID != srcID || srv.Type == "" { - notFound(w, id, s.Logger) - return - } - - var req patchServiceRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := req.Valid(); err != nil { - invalidData(w, err, s.Logger) - return - } - - if req.Name != nil { - srv.Name = *req.Name - } - if req.Type != nil { - srv.Type = *req.Type - } - if req.URL != nil { - srv.URL = *req.URL - } - if req.Password != nil { - srv.Password = *req.Password - } - if req.Username != nil { - srv.Username = *req.Username - } - if req.InsecureSkipVerify != nil { - srv.InsecureSkipVerify = *req.InsecureSkipVerify - } - if req.Metadata != nil { - srv.Metadata = *req.Metadata - } - - if srv.Type == "flux" { - err := pingFlux(ctx, srv.URL, srv.InsecureSkipVerify) - if err != nil { - msg := fmt.Sprintf("Unable to reach flux %s: %v", srv.URL, err) - Error(w, http.StatusGatewayTimeout, msg, s.Logger) - return - } - } - - if err := s.Store.Servers(ctx).Update(ctx, srv); err != nil { - msg := fmt.Sprintf("Error updating service ID %d", id) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - res := newService(srv) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -func pingFlux(ctx context.Context, address string, insecureSkipVerify bool) error { - url, err := url.ParseRequestURI(address) - if err != nil { - return fmt.Errorf("invalid service URI: %v", err) - } - client := &flux.Client{ - URL: url, - InsecureSkipVerify: insecureSkipVerify, - } - return client.Ping(ctx) -} diff --git a/chronograf/server/stores.go b/chronograf/server/stores.go deleted file mode 100644 index b6918c34d06..00000000000 --- a/chronograf/server/stores.go +++ /dev/null @@ -1,289 +0,0 @@ -package server - -import ( - "context" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/noop" - "github.com/influxdata/influxdb/v2/chronograf/organizations" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -// hasOrganizationContext retrieves organization specified on context -// under the organizations.ContextKey -func hasOrganizationContext(ctx context.Context) (string, bool) { - // prevents panic in case of nil context - if ctx == nil { - return "", false - } - orgID, ok := ctx.Value(organizations.ContextKey).(string) - // should never happen - if !ok { - return "", false - } - if orgID == "" { - return "", false - } - return orgID, true -} - -// hasRoleContext retrieves organization specified on context -// under the organizations.ContextKey -func hasRoleContext(ctx context.Context) (string, bool) { - // prevents panic in case of nil context - if ctx == nil { - return "", false - } - role, ok := ctx.Value(roles.ContextKey).(string) - // should never happen - if !ok { - return "", false - } - switch role { - case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName: - return role, true - default: - return "", false - } -} - -type userContextKey string - -// UserContextKey is the context key for retrieving the user off of context -const UserContextKey = userContextKey("user") - -// hasUserContext specifies if the context contains -// the UserContextKey and that the value stored there is chronograf.User -func hasUserContext(ctx context.Context) (*chronograf.User, bool) { - // prevents panic in case of nil context - if ctx == nil { - return nil, false - } - u, ok := ctx.Value(UserContextKey).(*chronograf.User) - // should never happen - if !ok { - return nil, false - } - if u == nil { - return nil, false - } - return u, true -} - -// hasSuperAdminContext specifies if the context contains -// the UserContextKey user is a super admin -func hasSuperAdminContext(ctx context.Context) bool { - u, ok := hasUserContext(ctx) - if !ok { - return false - } - return u.SuperAdmin -} - -// DataStore is collection of resources that are used by the Service -// Abstracting this into an interface was useful for isolated testing -type DataStore interface { - Sources(ctx context.Context) chronograf.SourcesStore - Servers(ctx context.Context) chronograf.ServersStore - Layouts(ctx context.Context) chronograf.LayoutsStore - Users(ctx context.Context) chronograf.UsersStore - Organizations(ctx context.Context) chronograf.OrganizationsStore - Mappings(ctx context.Context) chronograf.MappingsStore - Dashboards(ctx context.Context) chronograf.DashboardsStore - Config(ctx context.Context) chronograf.ConfigStore - OrganizationConfig(ctx context.Context) chronograf.OrganizationConfigStore -} - -// ensure that Store implements a DataStore -var _ DataStore = &Store{} - -// Store implements the DataStore interface -type Store struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - MappingsStore chronograf.MappingsStore - OrganizationsStore chronograf.OrganizationsStore - ConfigStore chronograf.ConfigStore - OrganizationConfigStore chronograf.OrganizationConfigStore -} - -// Sources returns a noop.SourcesStore if the context has no organization specified -// and an organization.SourcesStore otherwise. -func (s *Store) Sources(ctx context.Context) chronograf.SourcesStore { - if isServer := hasServerContext(ctx); isServer { - return s.SourcesStore - } - if org, ok := hasOrganizationContext(ctx); ok { - return organizations.NewSourcesStore(s.SourcesStore, org) - } - - return &noop.SourcesStore{} -} - -// Servers returns a noop.ServersStore if the context has no organization specified -// and an organization.ServersStore otherwise. -func (s *Store) Servers(ctx context.Context) chronograf.ServersStore { - if isServer := hasServerContext(ctx); isServer { - return s.ServersStore - } - if org, ok := hasOrganizationContext(ctx); ok { - return organizations.NewServersStore(s.ServersStore, org) - } - - return &noop.ServersStore{} -} - -// Layouts returns all layouts in the underlying layouts store. -func (s *Store) Layouts(ctx context.Context) chronograf.LayoutsStore { - return s.LayoutsStore -} - -// Users returns a chronograf.UsersStore. -// If the context is a server context, then the underlying chronograf.UsersStore -// is returned. -// If there is an organization specified on context, then an organizations.UsersStore -// is returned. -// If neither are specified, a noop.UsersStore is returned. -func (s *Store) Users(ctx context.Context) chronograf.UsersStore { - if isServer := hasServerContext(ctx); isServer { - return s.UsersStore - } - if org, ok := hasOrganizationContext(ctx); ok { - return organizations.NewUsersStore(s.UsersStore, org) - } - - return &noop.UsersStore{} -} - -// Dashboards returns a noop.DashboardsStore if the context has no organization specified -// and an organization.DashboardsStore otherwise. -func (s *Store) Dashboards(ctx context.Context) chronograf.DashboardsStore { - if isServer := hasServerContext(ctx); isServer { - return s.DashboardsStore - } - if org, ok := hasOrganizationContext(ctx); ok { - return organizations.NewDashboardsStore(s.DashboardsStore, org) - } - - return &noop.DashboardsStore{} -} - -// OrganizationConfig returns a noop.OrganizationConfigStore if the context has no organization specified -// and an organization.OrganizationConfigStore otherwise. -func (s *Store) OrganizationConfig(ctx context.Context) chronograf.OrganizationConfigStore { - if orgID, ok := hasOrganizationContext(ctx); ok { - return organizations.NewOrganizationConfigStore(s.OrganizationConfigStore, orgID) - } - - return &noop.OrganizationConfigStore{} -} - -// Organizations returns the underlying OrganizationsStore. -func (s *Store) Organizations(ctx context.Context) chronograf.OrganizationsStore { - if isServer := hasServerContext(ctx); isServer { - return s.OrganizationsStore - } - if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin { - return s.OrganizationsStore - } - if org, ok := hasOrganizationContext(ctx); ok { - return organizations.NewOrganizationsStore(s.OrganizationsStore, org) - } - return &noop.OrganizationsStore{} -} - -// Config returns the underlying ConfigStore. -func (s *Store) Config(ctx context.Context) chronograf.ConfigStore { - if isServer := hasServerContext(ctx); isServer { - return s.ConfigStore - } - if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin { - return s.ConfigStore - } - - return &noop.ConfigStore{} -} - -// Mappings returns the underlying MappingsStore. -func (s *Store) Mappings(ctx context.Context) chronograf.MappingsStore { - if isServer := hasServerContext(ctx); isServer { - return s.MappingsStore - } - if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin { - return s.MappingsStore - } - return &noop.MappingsStore{} -} - -// ensure that DirectStore implements a DataStore -var _ DataStore = &DirectStore{} - -// Store implements the DataStore interface -type DirectStore struct { - SourcesStore chronograf.SourcesStore - ServersStore chronograf.ServersStore - LayoutsStore chronograf.LayoutsStore - UsersStore chronograf.UsersStore - DashboardsStore chronograf.DashboardsStore - MappingsStore chronograf.MappingsStore - OrganizationsStore chronograf.OrganizationsStore - ConfigStore chronograf.ConfigStore - OrganizationConfigStore chronograf.OrganizationConfigStore -} - -// Sources returns a noop.SourcesStore if the context has no organization specified -// and an organization.SourcesStore otherwise. -func (s *DirectStore) Sources(ctx context.Context) chronograf.SourcesStore { - return s.SourcesStore -} - -// Servers returns a noop.ServersStore if the context has no organization specified -// and an organization.ServersStore otherwise. -func (s *DirectStore) Servers(ctx context.Context) chronograf.ServersStore { - return s.ServersStore -} - -// Layouts returns all layouts in the underlying layouts store. -func (s *DirectStore) Layouts(ctx context.Context) chronograf.LayoutsStore { - return s.LayoutsStore -} - -// Users returns a chronograf.UsersStore. -// If the context is a server context, then the underlying chronograf.UsersStore -// is returned. -// If there is an organization specified on context, then an organizations.UsersStore -// is returned. -// If neither are specified, a noop.UsersStore is returned. -func (s *DirectStore) Users(ctx context.Context) chronograf.UsersStore { - return s.UsersStore -} - -// Dashboards returns a noop.DashboardsStore if the context has no organization specified -// and an organization.DashboardsStore otherwise. -func (s *DirectStore) Dashboards(ctx context.Context) chronograf.DashboardsStore { - return s.DashboardsStore -} - -// OrganizationConfig returns a noop.OrganizationConfigStore if the context has no organization specified -// and an organization.OrganizationConfigStore otherwise. -func (s *DirectStore) OrganizationConfig(ctx context.Context) chronograf.OrganizationConfigStore { - return s.OrganizationConfigStore -} - -// Organizations returns the underlying OrganizationsStore. -func (s *DirectStore) Organizations(ctx context.Context) chronograf.OrganizationsStore { - return s.OrganizationsStore -} - -// Config returns the underlying ConfigStore. -func (s *DirectStore) Config(ctx context.Context) chronograf.ConfigStore { - return s.ConfigStore -} - -// Mappings returns the underlying MappingsStore. -func (s *DirectStore) Mappings(ctx context.Context) chronograf.MappingsStore { - return s.MappingsStore -} diff --git a/chronograf/server/stores_test.go b/chronograf/server/stores_test.go deleted file mode 100644 index 5882c786d62..00000000000 --- a/chronograf/server/stores_test.go +++ /dev/null @@ -1,428 +0,0 @@ -package server - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/organizations" -) - -func TestStore_SourcesGet(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - organization string - id int - } - type wants struct { - source chronograf.Source - err bool - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Get source", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - }, nil - }, - }, - }, - args: args{ - organization: "0", - }, - wants: wants{ - source: chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - }, - }, - }, - { - name: "Get source - no organization specified on context", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - GetF: func(ctx context.Context, id int) (chronograf.Source, error) { - return chronograf.Source{ - ID: 1, - Name: "my sweet name", - Organization: "0", - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - err: true, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - store := &Store{ - SourcesStore: tt.fields.SourcesStore, - } - - ctx := context.Background() - - if tt.args.organization != "" { - ctx = context.WithValue(ctx, organizations.ContextKey, tt.args.organization) - } - - source, err := store.Sources(ctx).Get(ctx, tt.args.id) - if (err != nil) != tt.wants.err { - t.Errorf("%q. Store.Sources().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - return - } - if diff := cmp.Diff(source, tt.wants.source); diff != "" { - t.Errorf("%q. Store.Sources().Get():\n-got/+want\ndiff %s", tt.name, diff) - } - }) - } -} - -func TestStore_SourcesAll(t *testing.T) { - type fields struct { - SourcesStore chronograf.SourcesStore - } - type args struct { - organization string - } - type wants struct { - sources []chronograf.Source - err bool - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Get sources", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AllF: func(ctx context.Context) ([]chronograf.Source, error) { - return []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - }, - }, nil - }, - }, - }, - args: args{ - organization: "0", - }, - wants: wants{ - sources: []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - }, - }, - }, - }, - { - name: "Get sources - multiple orgs", - fields: fields{ - SourcesStore: &mocks.SourcesStore{ - AllF: func(ctx context.Context) ([]chronograf.Source, error) { - return []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - }, - { - ID: 2, - Name: "A bad source", - Organization: "0", - }, - { - ID: 3, - Name: "A good source", - Organization: "0", - }, - { - ID: 4, - Name: "a source I can has", - Organization: "0", - }, - { - ID: 5, - Name: "i'm in the wrong org", - Organization: "1", - }, - }, nil - }, - }, - }, - args: args{ - organization: "0", - }, - wants: wants{ - sources: []chronograf.Source{ - { - ID: 1, - Name: "my sweet name", - Organization: "0", - }, - { - ID: 2, - Name: "A bad source", - Organization: "0", - }, - { - ID: 3, - Name: "A good source", - Organization: "0", - }, - { - ID: 4, - Name: "a source I can has", - Organization: "0", - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - store := &Store{ - SourcesStore: tt.fields.SourcesStore, - } - - ctx := context.Background() - - if tt.args.organization != "" { - ctx = context.WithValue(ctx, organizations.ContextKey, tt.args.organization) - } - - sources, err := store.Sources(ctx).All(ctx) - if (err != nil) != tt.wants.err { - t.Errorf("%q. Store.Sources().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - return - } - if diff := cmp.Diff(sources, tt.wants.sources); diff != "" { - t.Errorf("%q. Store.Sources().Get():\n-got/+want\ndiff %s", tt.name, diff) - } - }) - } -} - -func TestStore_OrganizationsAdd(t *testing.T) { - type fields struct { - OrganizationsStore chronograf.OrganizationsStore - } - type args struct { - orgID string - serverContext bool - organization string - user *chronograf.User - } - type wants struct { - organization *chronograf.Organization - err bool - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "Get organization with server context", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "21", - Name: "my sweet name", - DefaultRole: "viewer", - }, nil - }, - }, - }, - args: args{ - serverContext: true, - orgID: "21", - }, - wants: wants{ - organization: &chronograf.Organization{ - ID: "21", - Name: "my sweet name", - DefaultRole: "viewer", - }, - }, - }, - { - name: "Get organization with super admin", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "21", - Name: "my sweet name", - DefaultRole: "viewer", - }, nil - }, - }, - }, - args: args{ - user: &chronograf.User{ - ID: 1337, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - orgID: "21", - }, - wants: wants{ - organization: &chronograf.Organization{ - ID: "21", - Name: "my sweet name", - DefaultRole: "viewer", - }, - }, - }, - { - name: "Get organization not as super admin no organization", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "21", - Name: "my sweet name", - DefaultRole: "viewer", - }, nil - }, - }, - }, - args: args{ - user: &chronograf.User{ - ID: 1337, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - }, - orgID: "21", - }, - wants: wants{ - err: true, - }, - }, - { - name: "Get organization not as super admin with organization", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "22", - Name: "my sweet name", - DefaultRole: "viewer", - }, nil - }, - }, - }, - args: args{ - user: &chronograf.User{ - ID: 1337, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - }, - organization: "22", - orgID: "22", - }, - wants: wants{ - organization: &chronograf.Organization{ - ID: "22", - Name: "my sweet name", - DefaultRole: "viewer", - }, - }, - }, - { - name: "Get different organization not as super admin with organization", - fields: fields{ - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - return &chronograf.Organization{ - ID: "22", - Name: "my sweet name", - DefaultRole: "viewer", - }, nil - }, - }, - }, - args: args{ - user: &chronograf.User{ - ID: 1337, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - }, - organization: "21", - orgID: "21", - }, - wants: wants{ - err: true, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - store := &Store{ - OrganizationsStore: tt.fields.OrganizationsStore, - } - - ctx := context.Background() - - if tt.args.serverContext { - ctx = serverContext(ctx) - } - - if tt.args.organization != "" { - ctx = context.WithValue(ctx, organizations.ContextKey, tt.args.organization) - } - - if tt.args.user != nil { - ctx = context.WithValue(ctx, UserContextKey, tt.args.user) - } - - organization, err := store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &tt.args.orgID}) - if (err != nil) != tt.wants.err { - t.Errorf("%q. Store.Organizations().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err) - return - } - if diff := cmp.Diff(organization, tt.wants.organization); diff != "" { - t.Errorf("%q. Store.Organizations().Get():\n-got/+want\ndiff %s", tt.name, diff) - } - }) - } -} diff --git a/chronograf/server/swagger.go b/chronograf/server/swagger.go deleted file mode 100644 index 83f97ceee19..00000000000 --- a/chronograf/server/swagger.go +++ /dev/null @@ -1,20 +0,0 @@ -package server - -//go:generate env GO111MODULE=on go run github.com/kevinburke/go-bindata/go-bindata -o swagger_gen.go -tags assets -ignore go -nocompress -pkg server . - -import "net/http" - -// Spec servers the swagger.json file from bindata -func Spec() http.HandlerFunc { - swagger, err := Asset("swagger.json") - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - _, _ = w.Write(swagger) - }) -} diff --git a/chronograf/server/swagger.json b/chronograf/server/swagger.json deleted file mode 100644 index db58b912b34..00000000000 --- a/chronograf/server/swagger.json +++ /dev/null @@ -1,6080 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "Chronograf", - "description": "API endpoints for Chronograf", - "version": "1.5.0.0" - }, - "schemes": ["http"], - "basePath": "/chronograf/v1", - "consumes": ["application/json"], - "produces": ["application/json"], - "paths": { - "/": { - "get": { - "tags": ["routes"], - "summary": "Lists all the endpoints", - "description": "List of the endpoints.", - "responses": { - "200": { - "description": "Returns the links to the top level endpoints.", - "schema": { - "$ref": "#/definitions/Routes" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources": { - "get": { - "tags": ["sources"], - "summary": "Configured data sources", - "description": "These data sources store time series data.", - "responses": { - "200": { - "description": "An array of data sources", - "schema": { - "$ref": "#/definitions/Sources" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["sources"], - "summary": "Create new data source", - "parameters": [ - { - "name": "source", - "in": "body", - "description": "Configuration options for data source", - "schema": { - "$ref": "#/definitions/Source" - } - } - ], - "responses": { - "201": { - "description": "Data source successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": - "Location of the newly created data source resource." - } - }, - "schema": { - "$ref": "#/definitions/Source" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}": { - "get": { - "tags": ["sources"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - } - ], - "summary": "Configured data sources", - "description": "These data sources store time series data.", - "responses": { - "200": { - "description": - "Data source used to supply time series information.", - "schema": { - "$ref": "#/definitions/Source" - } - }, - "404": { - "description": "Unknown source id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["sources"], - "summary": "Update data source configuration", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of a data source", - "required": true - }, - { - "name": "config", - "in": "body", - "description": "data source configuration", - "schema": { - "$ref": "#/definitions/Source" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Data source's configuration was changed", - "schema": { - "$ref": "#/definitions/Source" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent data source.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["sources"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - } - ], - "summary": - "This specific data source will be removed from the data store. All associated kapacitor resources and kapacitor rules resources are also removed.", - "responses": { - "204": { - "description": "data source has been removed" - }, - "404": { - "description": "Unknown data source id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/queries": { - "post": { - "tags": ["sources", "queries"], - "description": "Used to analyze queries for structure`", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "queries", - "in": "body", - "description": "Query Parameters", - "schema": { - "$ref": "#/definitions/Queries" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Result of the analysis of the query.", - "schema": { - "$ref": "#/definitions/QueriesResponse" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/proxy": { - "post": { - "tags": ["sources", "proxy"], - "description": - "Query the backend time series data source and return the response according to `format`", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "query", - "in": "body", - "description": "Query Parameters", - "schema": { - "$ref": "#/definitions/Proxy" - }, - "required": true - } - ], - "responses": { - "200": { - "description": - "Result of the query from the backend time series data source.", - "schema": { - "$ref": "#/definitions/ProxyResponse" - } - }, - "400": { - "description": - "Any query that results in a data source error (syntax error, etc) will cause this response. The error message will be passed back in the body", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "408": { - "description": "Timeout trying to query data source.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/write": { - "post": { - "tags": ["sources", "write"], - "description": "Write points to the backend time series data source", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "query", - "in": "body", - "description": "Write Parameters", - "schema": { - "type": "string", - "format": "byte" - }, - "required": true - }, - { - "name": "db", - "in": "query", - "description": "Sets the target database for the write.", - "type": "string", - "required": true - }, - { - "name": "rp", - "in": "query", - "description": - "Sets the target retention policy for the write. InfluxDB writes to the DEFAULT retention policy if you do not specify a retention policy.", - "type": "string" - }, - { - "name": "precision", - "in": "query", - "description": - "Sets the precision for the supplied Unix time values. InfluxDB assumes that timestamps are in nanoseconds if you do not specify precision.", - "type": "string", - "enum": ["ns", "u", "ms", "s", "m", "h"] - }, - { - "name": "consistency", - "in": "query", - "description": - "Sets the write consistency for the point. InfluxDB assumes that the write consistency is one if you do not specify consistency. See the InfluxEnterprise documentation for detailed descriptions of each consistency option.", - "type": "string", - "enum": ["any", "one", "quorum", "all"] - } - ], - "responses": { - "204": { - "description": "Points written successfuly to database." - }, - "400": { - "description": - "Any query that results in a data source error (syntax error, etc) will cause this response. The error message will be passed back in the body", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "408": { - "description": "Timeout trying to query data source.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/health": { - "get": { - "tags": ["sources"], - "summary": "Health check for source", - "description": "Returns if the tsdb source can be contacted", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - } - ], - "responses": { - "204": { - "description": "Source was able to be contacted" - }, - "404": { - "description": "Source could not be contacted", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/permissions": { - "get": { - "tags": ["sources", "users"], - "summary": "Retrieve possible permissions for this data source", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - } - ], - "responses": { - "200": { - "description": "Listing of all possible permissions", - "schema": { - "$ref": "#/definitions/AllPermissions" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/users": { - "get": { - "tags": ["sources", "users"], - "summary": "Retrieve all data sources users", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - } - ], - "responses": { - "200": { - "description": "Listing of all users", - "schema": { - "$ref": "#/definitions/InfluxDB-Users" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["sources", "users"], - "summary": "Create new user for this data source", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "user", - "in": "body", - "description": "Configuration options for new user", - "schema": { - "$ref": "#/definitions/InfluxDB-User" - } - } - ], - "responses": { - "201": { - "description": "User successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": "Location of the newly created user resource." - } - }, - "schema": { - "$ref": "#/definitions/InfluxDB-User" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/users/{user_id}": { - "get": { - "tags": ["sources", "users"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of the specific user", - "required": true - } - ], - "summary": "Returns information about a specific user", - "description": "Specific User within a data source", - "responses": { - "200": { - "description": "Information relating to the user", - "schema": { - "$ref": "#/definitions/InfluxDB-User" - } - }, - "404": { - "description": "Unknown user or unknown source", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["sources", "users"], - "summary": "Update user configuration", - "description": - "Update one parameter at a time (one of password, permissions or roles)", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of the specific user", - "required": true - }, - { - "name": "config", - "in": "body", - "description": "user configuration", - "schema": { - "$ref": "#/definitions/InfluxDB-User" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Users's configuration was changed", - "schema": { - "$ref": "#/definitions/InfluxDB-User" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent user or source.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["sources", "users"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of the specific user", - "required": true - } - ], - "summary": "This specific user will be removed from the data source", - "responses": { - "204": { - "description": "User has been removed" - }, - "404": { - "description": "Unknown user id or data source", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/roles": { - "get": { - "tags": ["sources", "users", "roles"], - "summary": - "Retrieve all data sources roles. Available only in Influx Enterprise", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - } - ], - "responses": { - "200": { - "description": "Listing of all roles", - "schema": { - "$ref": "#/definitions/InfluxDB-Roles" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["sources", "users", "roles"], - "summary": "Create new role for this data source", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "roleuser", - "in": "body", - "description": "Configuration options for new role", - "schema": { - "$ref": "#/definitions/InfluxDB-Role" - } - } - ], - "responses": { - "201": { - "description": "Role successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": "Location of the newly created role resource." - } - }, - "schema": { - "$ref": "#/definitions/InfluxDB-Role" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/roles/{role_id}": { - "get": { - "tags": ["sources", "users", "roles"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "role_id", - "in": "path", - "type": "string", - "description": "ID of the specific role", - "required": true - } - ], - "summary": "Returns information about a specific role", - "description": "Specific role within a data source", - "responses": { - "200": { - "description": "Information relating to the role", - "schema": { - "$ref": "#/definitions/InfluxDB-Role" - } - }, - "404": { - "description": "Unknown role or unknown source", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["sources", "users", "roles"], - "summary": "Update role configuration", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "role_id", - "in": "path", - "type": "string", - "description": "ID of the specific role", - "required": true - }, - { - "name": "config", - "in": "body", - "description": "role configuration", - "schema": { - "$ref": "#/definitions/InfluxDB-Role" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Roles's configuration was changed", - "schema": { - "$ref": "#/definitions/InfluxDB-Role" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent role or source.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["sources", "users", "roles"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "role_id", - "in": "path", - "type": "string", - "description": "ID of the specific role", - "required": true - } - ], - "summary": "This specific role will be removed from the data source", - "responses": { - "204": { - "description": "Role has been removed" - }, - "404": { - "description": "Unknown role id or data source", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/dbs/": { - "get": { - "tags": ["databases"], - "summary": "Retrieve all databases for a source", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - } - ], - "responses": { - "200": { - "description": "Listing of all databases for a source", - "schema": { - "$ref": "#/definitions/Databases" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["databases"], - "summary": "Create new database for a source", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "database", - "in": "body", - "description": "Configuration options for a database", - "schema": { - "$ref": "#/definitions/Database" - }, - "required": true - } - ], - "responses": { - "201": { - "description": "Database successfully created.", - "schema": { - "$ref": "#/definitions/Database" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/dbs/{db}": { - "delete": { - "tags": ["databases"], - "summary": "Delete database for a source", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "db", - "in": "path", - "type": "string", - "description": "Name of the database", - "required": true - } - ], - "responses": { - "204": { - "description": "Database has been deleted" - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/dbs/{db}/rps": { - "get": { - "tags": ["retention policies"], - "summary": "Retrieve all retention policies for a database", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "db", - "in": "path", - "type": "string", - "description": "Name of the database", - "required": true - } - ], - "responses": { - "200": { - "description": "Listing of all retention policies for a database", - "schema": { - "$ref": "#/definitions/RetentionPolicies" - } - }, - "404": { - "description": "Specified retention policy does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["retention policies"], - "summary": "Create new retention policy for a database", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "db", - "in": "path", - "type": "string", - "description": "Name of the database", - "required": true - }, - { - "name": "rp", - "in": "body", - "description": "Configuration options for the retention policy", - "schema": { - "$ref": "#/definitions/RetentionPolicy" - }, - "required": true - } - ], - "responses": { - "201": { - "description": "Retention Policy successfully created.", - "schema": { - "$ref": "#/definitions/RetentionPolicy" - } - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/dbs/{db}/rps/{rp}": { - "patch": { - "tags": ["retention policies"], - "summary": "Alter retention policy for a database", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "db", - "in": "path", - "type": "string", - "description": "Name of the database", - "required": true - }, - { - "name": "rp", - "in": "path", - "type": "string", - "description": "Name of the retention policy", - "required": true - }, - { - "name": "rp", - "in": "body", - "description": "Configuration options for the retention policy", - "schema": { - "$ref": "#/definitions/RetentionPolicy" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Retention Policy was altered", - "schema": { - "$ref": "#/definitions/RetentionPolicy" - } - }, - "404": { - "description": "Database or source does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["retention policies"], - "summary": "Delete retention policy for a database", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "name": "db", - "in": "path", - "type": "string", - "description": "Name of the database", - "required": true - }, - { - "name": "rp", - "in": "path", - "type": "string", - "description": "Name of the retention policy", - "required": true - } - ], - "responses": { - "204": { - "description": "Retention Policy has been deleted" - }, - "404": { - "description": "Data source id does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal service error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/dbs/{db}/measurements": { - "get": { - "tags": ["measurements"], - "summary": "Retrieve measurements in a database", - "parameters": [ - { - "in": "path", - "name": "id", - "type": "string", - "description": "ID of the data source", - "required": true - }, - { - "in": "path", - "name": "db", - "type": "string", - "description": "Name of the database", - "required": true - }, - { - "in": "query", - "name": "limit", - "type": "integer", - "minimum": 1, - "default": 100, - "description": - "The upper limit of the number of available database measurements to return.", - "required": false - }, - { - "in": "query", - "name": "offset", - "type": "integer", - "minimum": 0, - "default": 0, - "description": - "The number of measurements to skip before starting to collect the result set.", - "required": false - } - ], - "responses": { - "200": { - "description": "Listing of measurements for a database", - "schema": { - "$ref": "#/definitions/MeasurementsResponse" - } - }, - "400": { - "description": - "Unable to connect to source; or unable to get measurements from database.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "404": { - "description": "Source not found.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "422": { - "description": - "Invalid source id param value in path; or invalid limit or offset param value in query.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal service error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/kapacitors": { - "get": { - "tags": ["sources", "kapacitors"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - } - ], - "summary": "Retrieve list of configured kapacitors", - "responses": { - "200": { - "description": "An array of kapacitors", - "schema": { - "$ref": "#/definitions/Kapacitors" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["sources", "kapacitors"], - "summary": "Create new kapacitor backend", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapacitor", - "in": "body", - "description": "Configuration options for kapacitor", - "schema": { - "$ref": "#/definitions/Kapacitor" - } - } - ], - "responses": { - "201": { - "description": "Kapacitor source successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": - "Location of the newly created kapacitor resource." - } - }, - "schema": { - "$ref": "#/definitions/Kapacitor" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/kapacitors/{kapa_id}": { - "get": { - "tags": ["sources", "kapacitors"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor", - "required": true - } - ], - "summary": "Configured kapacitors", - "description": "Retrieve information on a single kapacitor instance", - "responses": { - "200": { - "description": "Kapacitor connection information", - "schema": { - "$ref": "#/definitions/Kapacitor" - } - }, - "404": { - "description": "Unknown data source or kapacitor id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["sources", "kapacitors"], - "summary": "Update kapacitor configuration", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of a kapacitor backend", - "required": true - }, - { - "name": "config", - "in": "body", - "description": "kapacitor configuration", - "schema": { - "$ref": "#/definitions/Kapacitor" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Kapacitor's configuration was changed", - "schema": { - "$ref": "#/definitions/Kapacitor" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent data source or kapacitor.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["sources", "kapacitors"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor", - "required": true - } - ], - "summary": "Remove Kapacitor backend", - "description": - "This specific kapacitor will be removed. All associated rule resources will also be removed from the store.", - "responses": { - "204": { - "description": "kapacitor has been removed." - }, - "404": { - "description": "Unknown Data source or Kapacitor id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/kapacitors/{kapa_id}/rules": { - "get": { - "tags": ["sources", "kapacitors", "rules"], - "description": "Get all defined alert rules.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - } - ], - "responses": { - "200": { - "description": - "All alert rules for this specific kapacitor are returned", - "schema": { - "$ref": "#/definitions/Rules" - } - }, - "404": { - "description": "Data source or Kapacitor ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["sources", "kapacitors", "rules"], - "description": "Create kapacitor alert rule", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, - { - "name": "rule", - "in": "body", - "description": "Rule to generate alert rule", - "schema": { - "$ref": "#/definitions/Rule" - }, - "required": true - } - ], - "responses": { - "201": { - "description": "Kapacitor alert rule successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": - "Location of the newly created kapacitor rule resource." - } - }, - "schema": { - "$ref": "#/definitions/Rule" - } - }, - "404": { - "description": "Source ID or Kapacitor ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "422": { - "description": - "Source ID , Kapacitor ID or alert are unprocessable", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": - "Internal server error; generally a problem creating alert in kapacitor", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/kapacitors/{kapa_id}/rules/{rule_id}": { - "get": { - "tags": ["sources", "kapacitors", "rules"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor", - "required": true - }, - { - "name": "rule_id", - "in": "path", - "type": "string", - "description": "ID of the rule", - "required": true - } - ], - "summary": "Specific kapacitor alert rule", - "description": "Alerting rule for kapacitor", - "responses": { - "200": { - "description": "Alert exists and has a specific TICKscript", - "schema": { - "$ref": "#/definitions/Rule" - } - }, - "404": { - "description": "Unknown data source, kapacitor id, or rule id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "put": { - "tags": ["sources", "kapacitors", "rules"], - "summary": "Update rule alert rule configuration", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of a kapacitor backend", - "required": true - }, - { - "name": "rule_id", - "in": "path", - "type": "string", - "description": "ID of a rule", - "required": true - }, - { - "name": "rule", - "in": "body", - "description": "Rule update", - "schema": { - "$ref": "#/definitions/Rule" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Alert configuration was changed", - "schema": { - "$ref": "#/definitions/Rule" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent data source, kapacitor, or rule.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["sources", "kapacitors", "rules"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor", - "required": true - }, - { - "name": "rule_id", - "in": "path", - "type": "string", - "description": "ID of the rule", - "required": true - } - ], - "summary": "This specific alert rule will be removed.", - "responses": { - "204": { - "description": "Alert rule has been removed." - }, - "404": { - "description": "Unknown Data source, Kapacitor id, or alert rule", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/kapacitors/{kapa_id}/proxy": { - "get": { - "tags": ["sources", "kapacitors", "proxy"], - "description": - "GET to `path` of kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The kapacitor API path to use in the proxy redirect", - "required": true - } - ], - "responses": { - "204": { - "description": "Kapacitor returned no content" - }, - "404": { - "description": "Data source or Kapacitor ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from kapacitor", - "schema": { - "$ref": "#/definitions/KapacitorProxyResponse" - } - } - } - }, - "delete": { - "tags": ["sources", "kapacitors", "proxy"], - "description": - "DELETE to `path` of kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The kapacitor API path to use in the proxy redirect", - "required": true - } - ], - "responses": { - "204": { - "description": "Kapacitor returned no content" - }, - "404": { - "description": "Data source or Kapacitor ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from kapacitor", - "schema": { - "$ref": "#/definitions/KapacitorProxyResponse" - } - } - } - }, - "patch": { - "tags": ["sources", "kapacitors", "proxy"], - "description": - "PATCH body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The kapacitor API path to use in the proxy redirect", - "required": true - }, - { - "name": "query", - "in": "body", - "description": "Kapacitor body", - "schema": { - "$ref": "#/definitions/KapacitorProxy" - }, - "required": true - } - ], - "responses": { - "204": { - "description": "Kapacitor returned no content" - }, - "404": { - "description": "Data source or Kapacitor ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from kapacitor", - "schema": { - "$ref": "#/definitions/KapacitorProxyResponse" - } - } - } - }, - "post": { - "tags": ["sources", "kapacitors", "proxy"], - "description": - "POST body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The kapacitor API path to use in the proxy redirect", - "required": true - }, - { - "name": "query", - "in": "body", - "description": "Kapacitor body", - "schema": { - "$ref": "#/definitions/KapacitorProxy" - }, - "required": true - } - ], - "responses": { - "204": { - "description": "Kapacitor returned no content" - }, - "404": { - "description": "Kapacitor ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from kapacitor", - "schema": { - "$ref": "#/definitions/KapacitorProxyResponse" - } - } - } - } - }, - "/sources/{id}/services": { - "get": { - "tags": ["sources", "services"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - } - ], - "summary": "Retrieve list of services for a source", - "responses": { - "200": { - "description": "An array of services", - "schema": { - "$ref": "#/definitions/Services" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["sources", "services"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "service", - "in": "body", - "description": "Configuration options for the service", - "schema": { - "$ref": "#/definitions/Service" - } - } - ], - "summary": "Create a new service", - "responses": { - "200": { - "description": "Returns the newly created service", - "schema": { - "$ref": "#/definitions/Service" - } - }, - "504": { - "description": "Gateway timeout happens when the server cannot connect to the service", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/services/{srv_id}": { - "get": { - "tags": ["sources", "services"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "srv_id", - "in": "path", - "type": "string", - "description": "ID of the service", - "required": true - } - ], - "summary": "Retrieve a service", - "description": "Retrieve a single service by id", - "responses": { - "200": { - "description": "Service connection information", - "schema": { - "$ref": "#/definitions/Service" - } - }, - "404": { - "description": "Unknown data source or service id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["sources", "services"], - "summary": "Update service configuration", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "srv_id", - "in": "path", - "type": "string", - "description": "ID of a service backend", - "required": true - }, - { - "name": "service", - "in": "body", - "description": "service configuration", - "schema": { - "$ref": "#/definitions/Service" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Service configuration was changed", - "schema": { - "$ref": "#/definitions/Service" - } - }, - "504": { - "description": "Gateway timeout happens when the server cannot connect to the service", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "422": { - "description": "Unprocessable entity happens when the service ID provided does not exist", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["sources", "services"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "srv_id", - "in": "path", - "type": "string", - "description": "ID of the service", - "required": true - } - ], - "summary": "Remove Service backend", - "description": - "This specific service will be removed.", - "responses": { - "204": { - "description": "service has been removed." - }, - "404": { - "description": "Unknown Data source or Service id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/sources/{id}/services/{srv_id}/proxy": { - "get": { - "tags": ["sources", "services", "proxy"], - "description": - "GET to `path` of Service. The response and status code from Service is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "srv_id", - "in": "path", - "type": "string", - "description": "ID of the service backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The Service API path to use in the proxy redirect", - "required": true - } - ], - "responses": { - "204": { - "description": "Service returned no content" - }, - "404": { - "description": "Data source or Service ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from the service", - "schema": { - "$ref": "#/definitions/ServiceProxyResponse" - } - } - } - }, - "delete": { - "tags": ["sources", "services", "proxy"], - "description": - "DELETE to `path` of Service. The response and status code from the service is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "srv_id", - "in": "path", - "type": "string", - "description": "ID of the Service backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The Service API path to use in the proxy redirect", - "required": true - } - ], - "responses": { - "204": { - "description": "Service returned no content" - }, - "404": { - "description": "Data source or Service ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from the service", - "schema": { - "$ref": "#/definitions/ServiceProxyResponse" - } - } - } - }, - "patch": { - "tags": ["sources", "services", "proxy"], - "description": - "PATCH body directly to configured service. The response and status code from Service is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "srv_id", - "in": "path", - "type": "string", - "description": "ID of the Service backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The Service API path to use in the proxy redirect", - "required": true - }, - { - "name": "query", - "in": "body", - "description": "Service body", - "schema": { - "$ref": "#/definitions/ServiceProxy" - }, - "required": true - } - ], - "responses": { - "204": { - "description": "Service returned no content" - }, - "404": { - "description": "Data source or Service ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from Service", - "schema": { - "$ref": "#/definitions/ServiceProxyResponse" - } - } - } - }, - "post": { - "tags": ["sources", "services", "proxy"], - "description": - "POST body directly to configured Service. The response and status code from Service is directly returned.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, - { - "name": "srv_id", - "in": "path", - "type": "string", - "description": "ID of the Service backend.", - "required": true - }, - { - "name": "path", - "in": "query", - "type": "string", - "description": - "The Service API path to use in the proxy redirect", - "required": true - }, - { - "name": "query", - "in": "body", - "description": "Service body", - "schema": { - "$ref": "#/definitions/ServiceProxy" - }, - "required": true - } - ], - "responses": { - "204": { - "description": "Service returned no content" - }, - "404": { - "description": "Service ID does not exist.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Response directly from Service", - "schema": { - "$ref": "#/definitions/ServiceProxyResponse" - } - } - } - } - }, - "/mappings": { - "get": { - "tags": ["layouts", "mappings"], - "summary": "Mappings between app names and measurements", - "description": - "Mappings provide a means to alias measurement names found within a telegraf database and application layouts found within Chronograf\n", - "responses": { - "200": { - "description": "An array of mappings", - "schema": { - "$ref": "#/definitions/Mappings" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/layouts": { - "get": { - "tags": ["layouts"], - "summary": "Pre-configured layouts", - "parameters": [ - { - "name": "measurement", - "in": "query", - "description": "Returns layouts with this measurement", - "required": false, - "type": "array", - "items": { - "type": "string" - }, - "collectionFormat": "multi" - }, - { - "name": "app", - "in": "query", - "description": "Returns layouts with this app", - "required": false, - "type": "array", - "items": { - "type": "string" - }, - "collectionFormat": "multi" - } - ], - "description": - "Layouts are a collection of `Cells` that visualize time-series data.\n", - "responses": { - "200": { - "description": "An array of layouts", - "schema": { - "$ref": "#/definitions/Layouts" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["layouts"], - "summary": "Create new layout", - "parameters": [ - { - "name": "layout", - "in": "body", - "description": - "Defines the layout and queries of the cells within the layout.", - "schema": { - "$ref": "#/definitions/Layout" - } - } - ], - "responses": { - "201": { - "description": "Layout successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": "Location of the newly created layout" - } - }, - "schema": { - "$ref": "#/definitions/Layout" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/layouts/{id}": { - "get": { - "tags": ["layouts"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the layout", - "required": true - } - ], - "summary": - "Specific pre-configured layout containing cells and queries.", - "description": - "layouts will hold information about how to layout the page of graphs.\n", - "responses": { - "200": { - "description": "Returns the specified layout containing `cells`.", - "schema": { - "$ref": "#/definitions/Layout" - } - }, - "404": { - "description": "Unknown layout id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["layouts"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the layout", - "required": true - } - ], - "summary": "This specific layout will be removed from the data store", - "responses": { - "204": { - "description": "Layout has been removed." - }, - "404": { - "description": "Unknown layout id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "put": { - "tags": ["layouts"], - "summary": "Replace layout configuration.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of a layout", - "required": true - }, - { - "name": "config", - "in": "body", - "description": "layout configuration update parameters", - "schema": { - "$ref": "#/definitions/Layout" - }, - "required": true - } - ], - "responses": { - "200": { - "description": - "Layout has been replaced and the new layout is returned.", - "schema": { - "$ref": "#/definitions/Layout" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent layout.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/dashboards": { - "get": { - "tags": ["dashboards"], - "summary": "List of all dashboards", - "responses": { - "200": { - "description": "An array of dashboards", - "schema": { - "$ref": "#/definitions/Dashboards" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["dashboards"], - "summary": "Create new dashboard", - "parameters": [ - { - "name": "dashboard", - "in": "body", - "description": "Configuration options for new dashboard", - "schema": { - "$ref": "#/definitions/Dashboard" - } - } - ], - "responses": { - "201": { - "description": "Dashboard successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": - "Location of the newly created dashboard resource." - } - }, - "schema": { - "$ref": "#/definitions/Dashboard" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/dashboards/{id}": { - "get": { - "tags": ["dashboards"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "integer", - "description": "ID of the dashboard", - "required": true - } - ], - "summary": "Specific dashboard", - "description": - "Dashboards contain visual display information as well as links to queries", - "responses": { - "200": { - "description": - "Returns the specified dashboard with links to queries.", - "schema": { - "$ref": "#/definitions/Dashboard" - } - }, - "404": { - "description": "Unknown dashboard id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["dashboards"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "integer", - "description": "ID of the layout", - "required": true - } - ], - "summary": "Deletes the specified dashboard", - "responses": { - "204": { - "description": "Dashboard has been removed." - }, - "404": { - "description": "Unknown dashboard id", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "put": { - "tags": ["dashboards"], - "summary": "Replace dashboard information.", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "integer", - "description": "ID of a dashboard", - "required": true - }, - { - "name": "config", - "in": "body", - "description": "dashboard configuration update parameters", - "schema": { - "$ref": "#/definitions/Dashboard" - }, - "required": true - } - ], - "responses": { - "200": { - "description": - "Dashboard has been replaced and the new dashboard is returned.", - "schema": { - "$ref": "#/definitions/Dashboard" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent dashboard.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["layouts"], - "summary": "Update dashboard information.", - "description": - "Update either the dashboard name or the dashboard cells", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "integer", - "description": "ID of a dashboard", - "required": true - }, - { - "name": "config", - "in": "body", - "description": - "dashboard configuration update parameters. Must be either name or cells", - "schema": { - "$ref": "#/definitions/Dashboard" - }, - "required": true - } - ], - "responses": { - "200": { - "description": - "Dashboard has been updated and the new dashboard is returned.", - "schema": { - "$ref": "#/definitions/Dashboard" - } - }, - "404": { - "description": - "Happens when trying to access a non-existent dashboard.", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "A processing or an unexpected error.", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/organizations": { - "get": { - "tags": ["organizations", "users"], - "summary": "Retrieve all organizations", - "description": "Returns all organizations from the store", - "responses": { - "200": { - "description": - "Successfully retrieved all organizations from the store", - "schema": { - "$ref": "#/definitions/Organizations" - } - }, - "400": { - "description": "Failed to retrieve organizations from store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["organizations", "users"], - "summary": "Create new organization", - "description": "Creates a Chronograf organization in the store", - "parameters": [ - { - "name": "organization", - "in": "body", - "description": "Organization to create", - "schema": { - "$ref": "#/definitions/Organization" - } - } - ], - "responses": { - "201": { - "description": "Organization successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": - "Location of the newly created organization resource" - } - }, - "schema": { - "$ref": "#/definitions/Organization" - } - }, - "400": { - "description": - "Invalid JSON – unable to encode or decode; or failed to perform operation in data store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "422": { - "description": - "Invalid data schema provided to server for organization", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/organizations/{id}": { - "get": { - "tags": ["organizations", "users"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the organization", - "required": true - } - ], - "summary": "Retrieve a specific organization", - "description": "Returns a specific organization from the store", - "responses": { - "200": { - "description": "An Organization object", - "schema": { - "$ref": "#/definitions/Organization" - } - }, - "400": { - "description": "Failed to load organization from store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["organizations", "users"], - "summary": "Update existing organization", - "description": "Updates a Chronograf organization in the store", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the organization", - "required": true - }, - { - "name": "organization", - "in": "body", - "description": "Updated organization", - "schema": { - "$ref": "#/definitions/Organization" - }, - "required": true - } - ], - "responses": { - "201": { - "description": "Organization successfully updated", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": "Location of the updated organization resource" - } - }, - "schema": { - "$ref": "#/definitions/Organization" - } - }, - "400": { - "description": - "Invalid JSON – unable to encode or decode; or failed to perform operation in data store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "422": { - "description": - "Invalid data schema provided to server for organization", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["organizations", "users"], - "summary": "Delete organization", - "description": "Deletes a Chronograf organization in the store", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the organization", - "required": true - } - ], - "responses": { - "204": { - "description": "Organization successfully deleted" - }, - "400": { - "description": "Failed to perform operation in data store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "404": { - "description": "Organization not found", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/users": { - "get": { - "tags": ["organizations", "users"], - "summary": - "Retrieve all Chronograf users within the current organization", - "description": - "Returns all Chronograf users within the current organization from the store", - "responses": { - "200": { - "description": "Successfully retrieved all users from the store", - "schema": { - "$ref": "#/definitions/Users" - } - }, - "400": { - "description": "Failed to load users from store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "tags": ["organizations", "users"], - "summary": "Create new user", - "description": "Creates a Chronograf user in the store", - "parameters": [ - { - "name": "user", - "in": "body", - "description": "User to create", - "schema": { - "$ref": "#/definitions/User" - } - } - ], - "responses": { - "201": { - "description": "User successfully created", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": "Location of the newly created user resource" - } - }, - "schema": { - "$ref": "#/definitions/User" - } - }, - "400": { - "description": - "Invalid JSON – unable to encode or decode; or failed to perform operation in data store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "401": { - "description": "Unauthorized to perform this operation", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "422": { - "description": "Invalid data schema provided to server for user", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/users/{id}": { - "get": { - "tags": ["organizations", "users"], - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the user", - "required": true - } - ], - "summary": "Retrieve a specific user", - "description": "Returns a specific user from the store", - "responses": { - "200": { - "description": "An User object", - "schema": { - "$ref": "#/definitions/User" - } - }, - "400": { - "description": - "Failed to load user from store; or failed to parse user ID as valid", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "patch": { - "tags": ["organizations", "users"], - "summary": "Update existing user", - "description": "Updates a Chronograf user in the store", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the user", - "required": true - }, - { - "name": "user", - "in": "body", - "description": "Updated user", - "schema": { - "$ref": "#/definitions/User" - }, - "required": true - } - ], - "responses": { - "201": { - "description": "User successfully updated", - "headers": { - "Location": { - "type": "string", - "format": "url", - "description": "Location of the updated user resource" - } - }, - "schema": { - "$ref": "#/definitions/User" - } - }, - "400": { - "description": - "Invalid JSON – unable to encode or decode; failed to parse user id as valid; or failed to perform operation in data store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "401": { - "description": "Unauthorized to perform operation", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "404": { - "description": "User not found", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "422": { - "description": "Invalid data schema provided to server for user", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "delete": { - "tags": ["organizations", "users"], - "summary": "Delete user", - "description": "Deletes a Chronograf user in the store", - "parameters": [ - { - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the user", - "required": true - } - ], - "responses": { - "204": { - "description": "User successfully deleted" - }, - "400": { - "description": - "Failed to parse user id as valid; failed to retrieve user from context; or failed to perform operation in data store", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "403": { - "description": "Forbidden to access this route", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "404": { - "description": "User not found", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/chronograf/v1/config": { - "get": { - "tags": ["config"], - "summary": "Returns the global application configuration", - "description": "All global application configurations", - "responses": { - "200": { - "description": "Returns an object with the global configurations", - "schema": { - "$ref": "#/definitions/Config" - } - }, - "404": { - "description": "Could not find global application config", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/chronograf/v1/config/auth": { - "get": { - "tags": ["config"], - "summary": "Returns the global application configuration for auth", - "description": "All global application configuration for auth", - "responses": { - "200": { - "description": "Returns an object with the global application configuration for auth", - "schema": { - "$ref": "#/definitions/AuthConfig" - } - }, - "404": { - "description": "Could not find auth configuration", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "put": { - "tags": ["config"], - "summary": "Updates the global application configuration for auth", - "description": "Replaces the global application configuration for auth", - "parameters": [ - { - "name": "auth", - "in": "body", - "description": - "Auth configuration update object", - "schema": { - "$ref": "#/definitions/AuthConfig" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Returns an object with the updated auth configuration", - "schema": { - "$ref": "#/definitions/AuthConfig" - } - }, - "404": { - "description": "Could not find auth configuration", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/chronograf/v1/org_config": { - "get": { - "tags": ["organization config"], - "summary": "Retrieve the organization configuration", - "description": "Organization-specific configurations such as log viewer configs", - "responses": { - "200": { - "description": "Returns an object with the organization-specific configurations", - "schema": { - "$ref": "#/definitions/OrganizationConfig" - } - }, - "404": { - "description": "Could not find organization config", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/chronograf/v1/org_config/logviewer": { - "get": { - "tags": ["organization config"], - "summary": "Retrieve the organization-specific log viewer configurations", - "description": "Retrieve the log viewer configurations for the user's current organization", - "responses": { - "200": { - "description": "Returns an log viewer configuration object", - "schema": { - "$ref": "#/definitions/LogViewerConfig" - } - }, - "404": { - "description": "Could not find the log viewer configuration for this organization", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "put": { - "tags": ["organization config"], - "summary": "Update the log viewer configuration", - "description": "Update the log viewer configuration for a specific organization", - "parameters": [ - { - "name": "logViewer", - "in": "body", - "description": - "Log Viewer configuration update object", - "schema": { - "$ref": "#/definitions/LogViewerConfig" - }, - "required": true - } - ], - "responses": { - "200": { - "description": "Returns an object with the updated log viewer configurations", - "schema": { - "$ref": "#/definitions/LogViewerConfig" - } - }, - "404": { - "description": "Could not find log viewer configurations for the specified organization", - "schema": { - "$ref": "#/definitions/Error" - } - }, - "default": { - "description": "Unexpected internal server error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - } - }, - "definitions": { - "Organization": { - "type": "object", - "description": - "A group of Chronograf users with various role-based access-control.", - "properties": { - "defaultRole": { - "description": - "The default role that new users in this organization will have.", - "type": "string", - "enum": ["member", "viewer", "editor", "admin"] - }, - "id": { - "type": "string", - "description": - "Unique identifier representing an organization resource. The Default organization will have the id 'default', and any further will start at '1' and increment.", - "readOnly": true - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - } - }, - "readOnly": true - }, - "name": { - "type": "string", - "description": "User-facing name of the organization resource." - } - }, - "required": ["name"], - "example": { - "defaultRole": "viewer", - "id": "1", - "links": { - "self": "/chronograf/v1/organizations/1" - }, - "name": "Chronogiraffes" - } - }, - "Organizations": { - "type": "object", - "required": ["organizations"], - "properties": { - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - } - }, - "readOnly": true - }, - "organizations": { - "type": "array", - "items": { - "$ref": "#/definitions/Organization" - } - } - } - }, - "User": { - "type": "object", - "description": - "A Chronograf user with role-based access-control to an organization's resources.", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier representing a user resource", - "readOnly": true - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - } - }, - "readOnly": true - }, - "name": { - "type": "string", - "description": - "Username (as taken from principal given by auth provider)", - "readOnly": true - }, - "provider": { - "type": "string", - "description": "OAuth provider used to authenticate", - "readOnly": true - }, - "roles": { - "type": "array", - "items": { - "$ref": "#/definitions/Role" - } - }, - "scheme": { - "type": "string", - "description": - "Scheme used to authenticate (only OAuth2 currently supported)", - "readOnly": true - }, - "superAdmin": { - "type": "boolean", - "description": - "If user has the ability to perform CRUD operations on Organizations, across Organizations, and on other SuperAdmin users" - } - }, - "required": ["id", "name", "provider", "roles", "scheme"], - "example": { - "id": "1", - "links": { - "self": "/chronograf/v1/users/1" - }, - "name": "pineapple@cubeoctohedron.flux", - "provider": "github", - "roles": { - "name": "editor", - "organization": "SpaceTeam" - }, - "scheme": "oauth2", - "superAdmin": false - } - }, - "Users": { - "type": "object", - "required": ["users"], - "properties": { - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - } - } - }, - "users": { - "type": "array", - "items": { - "$ref": "#/definitions/User" - } - } - } - }, - "Role": { - "type": "object", - "properties": { - "name": { - "description": "A Chronograf user's role within an organization.", - "type": "string", - "enum": ["member", "viewer", "editor", "admin"] - }, - "organization": { - "type": "string", - "description": "Name of organization user belongs to" - } - } - }, - "Databases": { - "type": "object", - "required": ["databases"], - "properties": { - "databases": { - "type": "array", - "items": { - "$ref": "#/definitions/Database" - } - } - } - }, - "Database": { - "type": "object", - "required": ["name"], - "example": { - "name": "NOAA_water_database", - "duration": "3d", - "replication": 3, - "shardDuration": "3h", - "retentionPolicies": [ - { - "name": "weekly", - "duration": "7d", - "replication": 1, - "shardDuration": "7d", - "default": true, - "links": { - "self": - "/chronograf/v1/sources/1/dbs/NOAA_water_database/rps/liquid" - } - } - ], - "links": { - "self": "/chronograf/v1/sources/1/dbs/NOAA_water_database", - "rps": "/chronograf/v1/sources/1/dbs/NOAA_water_database/rps", - "measurements": - "/chronograf/v1/sources/1/dbs/NOAA_water_database/measurements?limit=100&offset=0" - } - }, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the database" - }, - "duration": { - "type": "string", - "description": "the duration of the default retention policy" - }, - "replication": { - "type": "integer", - "format": "int32", - "description": "how many copies of the data are stored in the cluster" - }, - "shardDuration": { - "type": "string", - "description": "the interval spanned by each shard group" - }, - "retentionPolicies": { - "type": "array", - "items": { - "$ref": "#/definitions/RetentionPolicy" - } - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - }, - "rps": { - "type": "string", - "description": "Link to retention policies for this database", - "format": "url" - }, - "measurements": { - "type": "string", - "description": "Link to measurements for this database", - "format": "url" - } - } - } - } - }, - "Kapacitors": { - "type": "object", - "required": ["kapacitors"], - "properties": { - "kapacitors": { - "type": "array", - "items": { - "$ref": "#/definitions/Kapacitor" - } - } - } - }, - "Kapacitor": { - "type": "object", - "required": ["name", "url"], - "example": { - "id": "4", - "name": "kapa", - "url": "http://localhost:9092", - "active": false, - "insecureSkipVerify": false, - "links": { - "proxy": "/chronograf/v1/sources/4/kapacitors/4/proxy", - "self": "/chronograf/v1/sources/4/kapacitors/4", - "rules": "/chronograf/v1/sources/4/kapacitors/4/rules" - } - }, - "properties": { - "id": { - "type": "string", - "description": "Unique identifier representing a kapacitor instance.", - "readOnly": true - }, - "name": { - "type": "string", - "description": "User facing name of kapacitor instance." - }, - "username": { - "type": "string", - "description": "Username for authentication to kapacitor." - }, - "password": { - "type": "string", - "description": "Password is in cleartext." - }, - "url": { - "type": "string", - "format": "url", - "description": - "URL for the kapacitor backend (e.g. http://localhost:9092)" - }, - "insecureSkipVerify": { - "type": "boolean", - "description": - "True means any certificate presented by the kapacitor is accepted. Typically used for self-signed certs. Probably should only be used for testing." - }, - "active": { - "type": "boolean", - "description": - "Indicates whether the kapacitor is the current kapacitor being used for a source" - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - }, - "proxy": { - "type": "string", - "description": - "URL location of proxy endpoint for this kapacitor", - "format": "url" - }, - "rules": { - "type": "string", - "description": - "URL location of rules endpoint for this kapacitor", - "format": "url" - } - } - } - } - }, - "KapacitorProxy": { - "description": - "Entirely used as the body for the request to the kapacitor backend.", - "type": "object" - }, - "KapacitorProxyResponse": { - "description": "Entire response from the kapacitor backend.", - "type": "object" - }, - "Services": { - "type": "object", - "required": ["services"], - "properties": { - "services": { - "type": "array", - "items": { - "$ref": "#/definitions/Service" - } - } - } - }, - "Service": { - "type": "object", - "required": ["name", "url"], - "example": { - "id": "1", - "sourceID": "1", - "url": "http://localhost:8093", - "insecureSkipVerify": false, - "type": "flux", - "metadata": { - "active": true - }, - "links": { - "proxy": "/chronograf/v1/sources/1/services/1/proxy", - "self": "/chronograf/v1/sources/1/services/1", - "source": "/chronograf/v1/sources/1" - } - }, - "properties": { - "id": { - "type": "string", - "description": "Unique identifier representing a service.", - "readOnly": true - }, - "sourceID": { - "type": "string", - "description": "Unique identifier of the source associated with this service" - }, - "name": { - "type": "string", - "description": "User facing name of the service." - }, - "username": { - "type": "string", - "description": "Credentials for using this service" - }, - "url": { - "type": "string", - "format": "url", - "description": - "URL for the service backend (e.g. http://localhost:8093)" - }, - "insecureSkipVerify": { - "type": "boolean", - "description": - "True means any certificate presented by the service is accepted. Typically used for self-signed certs. Probably should only be used for testing." - }, - "type": { - "type": "string", - "description": "Indicates what kind of service this is (e.g. flux service)" - }, - "metadata": { - "type": "object", - "properties": { - "active": { - "type": "boolean", - "description": "Indicates whether the service is the current service being used for a source" - } - } - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - }, - "proxy": { - "type": "string", - "description": - "URL location of proxy endpoint for this service", - "format": "url" - }, - "source": { - "type": "string", - "description": - "URL location of the source this service is associated with", - "format": "url" - } - } - } - } - }, - "ServiceProxy": { - "description": - "Entirely used as the body for the request to the service backend.", - "type": "object" - }, - "ServiceProxyResponse": { - "description": "Entire response from the service backend.", - "type": "object" - }, - "Rules": { - "type": "object", - "required": ["rules"], - "properties": { - "rules": { - "type": "array", - "items": { - "$ref": "#/definitions/Rule" - } - } - } - }, - "Query": { - "type": "object", - "properties": { - "query": { - "type": "string", - "description": "InfluxQL statement to be analyzed", - "example": { - "query": - "select max(usage_system) from telegraf.autogen.cpu group by time(10m)" - } - } - } - }, - "QueryResponse": { - "type": "object", - "properties": { - "query": { - "type": "string", - "example": { - "query": - "select max(usage_system) from telegraf.autogen.cpu group by time(10m)" - }, - "description": "InfluxQL statement to be analyzed" - }, - "queryConfig": { - "$ref": "#/definitions/QueryConfig" - } - } - }, - "Queries": { - "type": "object", - "properties": { - "queries": { - "type": "array", - "items": { - "$ref": "#/definitions/Query" - } - } - } - }, - "QueriesResponse": { - "type": "object", - "properties": { - "queries": { - "type": "array", - "items": { - "$ref": "#/definitions/QueryResponse" - } - } - } - }, - "QueryConfig": { - "type": "object", - "example": { - "id": "ce72917d-1ecb-45ea-a6cb-4c122deb93c7", - "database": "telegraf", - "measurement": "cpu", - "retentionPolicy": "autogen", - "fields": [ - { - "value": "max", - "type": "func", - "args": [ - { - "value": "usage_system", - "type": "field" - } - ] - } - ], - "tags": {}, - "groupBy": { - "time": "10m", - "tags": [] - }, - "areTagsAccepted": true, - "range": { - "lower": "15m", - "upper": "now" - } - }, - "properties": { - "id": { - "type": "string" - }, - "database": { - "type": "string" - }, - "measurement": { - "type": "string" - }, - "retentionPolicy": { - "type": "string" - }, - "areTagsAccepted": { - "type": "boolean" - }, - "rawText": { - "type": "string" - }, - "tags": { - "type": "object" - }, - "groupBy": { - "type": "object", - "properties": { - "time": { - "type": "string" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "required": ["time", "tags"] - }, - "fields": { - "type": "array", - "items": { - "$ref": "#/definitions/Field" - } - }, - "range": { - "type": "object", - "properties": { - "lower": { - "type": "string" - }, - "upper": { - "type": "string" - } - }, - "required": ["lower", "upper"] - } - }, - "required": [ - "database", - "measurement", - "retentionPolicy", - "areTagsAccepted", - "tags", - "groupBy", - "fields" - ] - }, - "KapacitorNode": { - "type": "object", - "description": "Represents a node in the kapacitor TICKscript graph", - "required": ["name"], - "properties": { - "name": { - "type": "string", - "description": "Name of the kapacitor node e.g. slack" - }, - "args": { - "type": "array", - "description": "All arguments to the named node", - "items": { - "type": "string" - } - }, - "properties": { - "type": "array", - "description": "All properties attached to the kapacitor node", - "items": { - "$ref": "#/definitions/KapacitorProperty" - } - } - } - }, - "Field": { - "type": "object", - "required": ["type", "value"], - "description": "Represents a field to be returned from an InfluxQL query", - "properties": { - "value": { - "description": - "value is the value of the field. Meaning of the value is implied by the `type` key", - "type": "string" - }, - "type": { - "description": - "type describes the field type. func is a function; field is a field reference", - "type": "string", - "enum": ["func", "field", "integer", "number", "regex", "wildcard"] - }, - "alias": { - "description": - "Alias overrides the field name in the returned response. Applies only if type is `func`", - "type": "string" - }, - "args": { - "description": "Args are the arguments to the function", - "type": "array", - "items": { - "$ref": "#/definitions/Field" - } - } - } - }, - "KapacitorProperty": { - "type": "object", - "description": - "Represents a property attached to a node in the kapacitor TICKscript graph", - "required": ["name"], - "properties": { - "name": { - "type": "string", - "description": - "Name of the kapacitor property e.g. channel for a slack node" - }, - "args": { - "type": "array", - "description": "All arguments to the named property", - "items": { - "type": "string" - } - } - } - }, - "RetentionPolicies": { - "type": "object", - "required": ["retentionPolicies"], - "properties": { - "retentionPolicies": { - "type": "array", - "items": { - "$ref": "#/definitions/RetentionPolicy" - } - } - } - }, - "RetentionPolicy": { - "type": "object", - "required": ["name", "duration", "replication"], - "example": { - "name": "weekly", - "duration": "7d", - "replication": 1, - "shardDuration": "7d", - "default": true, - "links": { - "self": "/chronograf/v1/sources/1/dbs/NOAA_water_database/rps/liquid" - } - }, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the retention policy" - }, - "duration": { - "type": "string", - "description": "the duration of the retention policy" - }, - "replication": { - "type": "integer", - "format": "int32", - "description": "how many copies of the data are stored in the cluster" - }, - "shardDuration": { - "type": "string", - "description": "the interval spanned by each shard group" - }, - "default": { - "type": "boolean", - "description": - "Indicates whether this retention policy should be the default" - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - } - } - } - } - }, - "MeasurementsResponse": { - "type": "object", - "properties": { - "measurements": { - "type": "array", - "items": { - "$ref": "#/definitions/Measurement" - } - }, - "links": { - "type": "object", - "description": - "Links to paginated measurements, relative to the current page.", - "properties": { - "self": { - "type": "string", - "format": "url", - "description": "Current page measurements", - "required": true - }, - "first": { - "type": "string", - "format": "url", - "description": "First page of measurements", - "required": true - }, - "next": { - "type": "string", - "format": "url", - "description": "Next page of measurements", - "required": true - }, - "prev": { - "type": "string", - "format": "url", - "description": - "Previous page of measurements, if not at the first page.", - "required": false - } - } - } - }, - "example": { - "measurements": [ - { - "name": "alerts" - }, - { - "name": "annotations" - } - ], - "links": { - "self": - "/chronograf/v1/sources/1/dbs/chronograf/measurements?limit=100&offset=0", - "first": - "/chronograf/v1/sources/1/dbs/chronograf/measurements?limit=100&offset=0", - "next": - "/chronograf/v1/sources/1/dbs/chronograf/measurements?limit=100&offset=100" - } - } - }, - "Measurement": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Measurement name." - } - } - }, - "Rule": { - "type": "object", - "example": { - "id": "chronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38", - "query": { - "id": "ce72917d-1ecb-45ea-a6cb-4c122deb93c7", - "database": "telegraf", - "measurement": "cpu", - "retentionPolicy": "autogen", - "fields": [ - { - "value": "max", - "type": "func", - "args": [ - { - "value": "usage_system", - "type": "field" - } - ] - } - ], - "tags": {}, - "groupBy": { - "time": "10m", - "tags": [] - }, - "areTagsAccepted": true - }, - "every": "30s", - "alerts": ["alerta"], - "alertNodes": [ - { - "name": "alerta", - "args": [], - "properties": [] - } - ], - "message": "too much spam", - "details": "muh body", - "trigger": "threshold", - "values": { - "operator": "greater than", - "value": "10" - }, - "name": "Untitled Rule", - "tickscript": - "var db = 'telegraf'\n\nvar rp = 'autogen'\n\nvar measurement = 'cpu'\n\nvar groupBy = []\n\nvar whereFilter = lambda: TRUE\n\nvar period = 10m\n\nvar every = 30s\n\nvar name = 'Untitled Rule'\n\nvar idVar = name + ':{{.Group}}'\n\nvar message = 'too much spam'\n\nvar idTag = 'alertID'\n\nvar levelTag = 'level'\n\nvar messageField = 'message'\n\nvar durationField = 'duration'\n\nvar outputDB = 'chronograf'\n\nvar outputRP = 'autogen'\n\nvar outputMeasurement = 'alerts'\n\nvar triggerType = 'threshold'\n\nvar details = 'muh body'\n\nvar crit = 10\n\nvar data = stream\n |from()\n .database(db)\n .retentionPolicy(rp)\n .measurement(measurement)\n .groupBy(groupBy)\n .where(whereFilter)\n |window()\n .period(period)\n .every(every)\n .align()\n |max('usage_system')\n .as('value')\n\nvar trigger = data\n |alert()\n .crit(lambda: \"value\" > crit)\n .stateChangesOnly()\n .message(message)\n .id(idVar)\n .idTag(idTag)\n .levelTag(levelTag)\n .messageField(messageField)\n .durationField(durationField)\n .details(details)\n .alerta()\n\ntrigger\n |influxDBOut()\n .create()\n .database(outputDB)\n .retentionPolicy(outputRP)\n .measurement(outputMeasurement)\n .tag('alertName', name)\n .tag('triggerType', triggerType)\n\ntrigger\n |httpOut('output')\n", - "type": "stream", - "dbrps": [ - { - "db": "telegraf", - "rp": "autogen" - } - ], - "status": "disabled", - "executing": false, - "error": "", - "created": "2017-05-05T16:16:03.471138388-05:00", - "modified": "2017-05-23T15:57:42.625909746-05:00", - "last-enabled": "2017-05-05T16:16:25.890210217-05:00", - "links": { - "self": - "/chronograf/v1/sources/5/kapacitors/5/rules/chronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38", - "kapacitor": - "/chronograf/v1/sources/5/kapacitors/5/proxy?path=%2Fkapacitor%2Fv1%2Ftasks%2Fchronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38", - "output": - "/chronograf/v1/sources/5/kapacitors/5/proxy?path=%2Fkapacitor%2Fv1%2Ftasks%2Fchronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38%2Foutput" - } - }, - "required": ["query", "every", "trigger"], - "properties": { - "id": { - "type": "string", - "description": "ID for this rule; the ID is shared with kapacitor" - }, - "query": { - "$ref": "#/definitions/QueryConfig" - }, - "name": { - "type": "string", - "description": "User facing name of the alerting rule" - }, - "every": { - "type": "string", - "description": - "Golang duration string specifying how often the alert condition is checked" - }, - "alerts": { - "type": "array", - "description": - "Array of alerting services to warn if the alert is triggered", - "items": { - "type": "string", - "enum": [ - "alerta", - "post", - "http", - "hipchat", - "kafka", - "opsgenie", - "opsgenie2", - "pagerduty", - "pagerduty2", - "victorops", - "email", - "exec", - "log", - "pushover", - "sensu", - "slack", - "smtp", - "talk", - "telegram", - "tcp" - ] - } - }, - "alertNodes": { - "type": "array", - "description": "Arguments and properties to add to alert", - "items": { - "$ref": "#/definitions/KapacitorNode" - } - }, - "message": { - "type": "string", - "description": "Message to send when alert occurs." - }, - "details": { - "type": "string", - "description": - "Template for constructing a detailed HTML message for the alert. (Currently, only used for email/smtp" - }, - "trigger": { - "type": "string", - "description": - "Trigger defines the alerting structure; deadman alert if no data are received for the specified time range; relative alert if the data change relative to the data in a different time range; threshold alert if the data cross a boundary", - "enum": ["deadman", "relative", "threshold"] - }, - "values": { - "type": "object", - "description": "Alerting logic for trigger type", - "properties": { - "change": { - "description": "Specifies if the change is percent or absolute", - "type": "string", - "enum": ["% change", "change"] - }, - "period": { - "description": - "Length of time before deadman is alerted (golang duration)", - "type": "string" - }, - "shift": { - "description": - "Amount of time to look into the past to compare to the present (golang duration)", - "type": "string" - }, - "operator": { - "description": "Operator for alert comparison", - "type": "string", - "enum": [ - "greater than", - "less than", - "equal to or less than", - "equal to or greater", - "equal to", - "not equal to", - "inside range", - "outside range" - ] - }, - "value": { - "description": - "Value is the boundary value when alert goes critical", - "type": "string" - }, - "rangeValue": { - "description": "Optional value for range comparisions", - "type": "string" - } - } - }, - "dbrps": { - "type": "array", - "description": - "List of database retention policy pairs the task is allowed to access.", - "items": { - "$ref": "#/definitions/DBRP" - } - }, - "tickscript": { - "type": "string", - "description": "TICKscript representing this rule" - }, - "status": { - "type": "string", - "description": - "Represents if this rule is enabled or disabled in kapacitor", - "enum": ["enabled", "disabled"] - }, - "executing": { - "type": "boolean", - "description": "Whether the task is currently executing.", - "readOnly": true - }, - "type": { - "type": "string", - "description": - "Represents the task type where stream is data streamed to kapacitor and batch is queried by kapacitor.", - "enum": ["stream", "batch"] - }, - "error": { - "type": "string", - "description": - "Any error encountered when kapacitor executes the task.", - "readOnly": true - }, - "created": { - "type": "string", - "description": "Date the task was first created", - "readOnly": true - }, - "modified": { - "type": "string", - "description": "Date the task was last modified", - "readOnly": true - }, - "last-enabled": { - "type": "string", - "description": "Date the task was last set to status enabled", - "readOnly": true - }, - "links": { - "type": "object", - "required": ["self", "kapacitor"], - "properties": { - "self": { - "description": "Self link pointing to this rule resource", - "type": "string", - "format": "uri" - }, - "kapacitor": { - "description": - "Link pointing to the kapacitor proxy for this rule including the path query parameter.", - "type": "string", - "format": "uri" - }, - "output": { - "description": - "Link pointing to the kapacitor httpOut node of the tickscript; includes the path query argument", - "type": "string", - "format": "uri" - } - } - } - } - }, - "DBRP": { - "type": "object", - "description": "Database retention policy pair", - "properties": { - "db": { - "description": "Database name", - "type": "string" - }, - "rp": { - "description": "Retention policy", - "type": "string" - } - }, - "required": ["db", "rp"] - }, - "Sources": { - "type": "array", - "items": { - "$ref": "#/definitions/Source" - } - }, - "Source": { - "type": "object", - "example": { - "id": "4", - "name": "Influx 1", - "type": "influx", - "url": "http://localhost:8086", - "default": false, - "telegraf": "telegraf", - "defaultRP": "customRP", - "organization": "default", - "authentication": "basic", - "role": "viewer", - "links": { - "self": "/chronograf/v1/sources/4", - "kapacitors": "/chronograf/v1/sources/4/kapacitors", - "proxy": "/chronograf/v1/sources/4/proxy", - "write": "/chronograf/v1/sources/4/write", - "queries": "/chronograf/v1/sources/4/queries", - "permissions": "/chronograf/v1/sources/4/permissions", - "users": "/chronograf/v1/sources/4/users", - "roles": "/chronograf/v1/sources/4/roles", - "health": "/chronograf/v1/sources/4/health" - } - }, - "required": ["url"], - "properties": { - "id": { - "type": "string", - "description": - "Unique identifier representing a specific data source.", - "readOnly": true - }, - "name": { - "type": "string", - "description": "User facing name of data source" - }, - "type": { - "type": "string", - "description": "Format of the data source", - "readOnly": true, - "enum": ["influx", "influx-enterprise", "influx-relay"] - }, - "username": { - "type": "string", - "description": "Username for authentication to data source" - }, - "password": { - "type": "string", - "description": "Password is in cleartext." - }, - "sharedSecret": { - "type": "string", - "description": - "JWT signing secret for optional Authorization: Bearer to InfluxDB" - }, - "url": { - "type": "string", - "format": "url", - "description": - "URL for the time series data source backend (e.g. http://localhost:8086)" - }, - "metaUrl": { - "type": "string", - "format": "url", - "description": "URL for the influxdb meta node" - }, - "insecureSkipVerify": { - "type": "boolean", - "description": - "True means any certificate presented by the source is accepted. Typically used for self-signed certs. Probably should only be used for testing." - }, - "default": { - "type": "boolean", - "description": "Indicates whether this source is the default source" - }, - "telegraf": { - "type": "string", - "description": - "Database where telegraf information is stored for this source", - "default": "telegraf" - }, - "defaultRP": { - "type": "string", - "description": - "Default retention policy used in Host-related queries proxied to InfluxDB from the Host List and Host pages.", - "default": "" - }, - "organization": { - "type": "string", - "description": - "Organization that this source belongs to, when Chronograf auth is in use", - "default": "default" - }, - "role": { - "type": "string", - "description": - "Not used currently. Can be used to designate a minimum role required to access this source.", - "default": "viewer" - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - }, - "proxy": { - "type": "string", - "description": "URL location of proxy endpoint for this source", - "format": "url" - }, - "write": { - "type": "string", - "description": "URL location of write endpoint for this source", - "format": "url" - }, - "queries": { - "type": "string", - "description": - "URL location of the queries endpoint for this source", - "format": "url" - }, - "kapacitors": { - "type": "string", - "description": - "URL location of the kapacitors endpoint for this source", - "format": "url" - }, - "users": { - "type": "string", - "description": - "URL location of the users endpoint for this source", - "format": "url" - }, - "permissions": { - "type": "string", - "description": - "URL location of the permissions endpoint for this source", - "format": "url" - }, - "roles": { - "type": "string", - "description": - "Optional path to the roles endpoint IFF it is supported on this source", - "format": "url" - }, - "health": { - "type": "string", - "description": "Path to determine if source is healthy", - "format": "url" - } - } - } - } - }, - "Proxy": { - "type": "object", - "example": { - "query": "select $myfield from cpu where time > now() - 10m", - "db": "telegraf", - "rp": "autogen", - "tempVars": [ - { - "tempVar": ":myfield:", - "values": [ - { - "type": "fieldKey", - "value": "usage_user" - } - ] - } - ] - }, - "required": ["query"], - "properties": { - "query": { - "type": "string" - }, - "db": { - "type": "string" - }, - "rp": { - "type": "string" - }, - "epoch": { - "description": "timestamp return format", - "type": "string", - "enum": ["h", "m", "s", "ms", "u", "ns"] - }, - "tempVars": { - "type": "array", - "description": - "Template variables to replace within an InfluxQL query", - "items": { - "$ref": "#/definitions/TemplateVariable" - } - } - } - }, - "TemplateVariable": { - "type": "object", - "description": - "Named variable within an InfluxQL query to be replaced with values", - "properties": { - "tempVar": { - "type": "string", - "description": "String to replace within an InfluxQL statement" - }, - "values": { - "type": "array", - "description": "Values used to replace tempVar.", - "items": { - "$ref": "#/definitions/TemplateValue" - } - } - } - }, - "TemplateValue": { - "type": "object", - "description": - "Value use to replace a template in an InfluxQL query. The type governs the output format", - "properties": { - "value": { - "type": "string", - "description": "Specific value that will be encoded based on type" - }, - "type": { - "type": "string", - "enum": ["csv", "tagKey", "tagValue", "fieldKey", "timeStamp", "map"], - "description": - "The type will change the format of the output value. tagKey/fieldKey are double quoted; tagValue are single quoted; csv and timeStamp are not quoted." - }, - "key": { - "type": "string", - "description":"This will be the key for a specific value of a template variable. Used if the templateVar type is 'map'" - } - } - }, - "ProxyResponse": { - "type": "object", - "example": { - "results": [ - { - "statement_id": 0, - "series": [ - { - "name": "cpu", - "columns": [ - "time", - "cpu", - "host", - "usage_guest", - "usage_guest_nice", - "usage_idle", - "usage_iowait", - "usage_irq", - "usage_nice", - "usage_softirq", - "usage_steal", - "usage_system", - "usage_user" - ], - "values": [ - [ - 1487785510000, - "cpu-total", - "ChristohersMBP2.lan", - 0, - 0, - 76.6916354556804, - 0, - 0, - 0, - 0, - 0, - 4.781523096129837, - 18.526841448189764 - ] - ] - } - ] - } - ] - }, - "properties": { - "results": { - "description": "results from influx", - "type": "object" - } - } - }, - "InfluxDB-Roles": { - "type": "array", - "items": { - "$ref": "#/definitions/InfluxDB-Role" - }, - "example": { - "roles": [ - { - "users": [ - { - "name": "admin", - "links": { - "self": "/chronograf/v1/sources/3/users/admin" - } - } - ], - "name": "timetravelers", - "permissions": [ - { - "scope": "database", - "name": "telegraf", - "allowed": ["ReadData", "WriteData"] - } - ], - "links": { - "self": "/chronograf/v1/sources/3/roles/timetravelers" - } - } - ] - } - }, - "InfluxDB-Role": { - "type": "object", - "required": ["name"], - "properties": { - "name": { - "type": "string", - "description": "Unique name of the role", - "maxLength": 254, - "minLength": 1 - }, - "users": { - "$ref": "#/definitions/InfluxDB-Users" - }, - "permissions": { - "$ref": "#/definitions/InfluxDB-Permissions" - }, - "links": { - "type": "object", - "description": "URL relations of this role", - "properties": { - "self": { - "type": "string", - "format": "url", - "description": "URI of resource." - } - } - } - }, - "example": { - "users": [ - { - "name": "admin", - "links": { - "self": "/chronograf/v1/sources/3/users/admin" - } - } - ], - "name": "timetravelers", - "permissions": [ - { - "scope": "database", - "name": "telegraf", - "allowed": ["ReadData", "WriteData"] - } - ], - "links": { - "self": "/chronograf/v1/sources/3/roles/timetravelers" - } - } - }, - "InfluxDB-Users": { - "type": "object", - "properties": { - "users": { - "type": "array", - "items": { - "$ref": "#/definitions/InfluxDB-User" - } - } - }, - "example": { - "users": [ - { - "name": "docbrown", - "permissions": [ - { - "scope": "all", - "allowed": [ - "ViewAdmin", - "ViewChronograf", - "CreateDatabase", - "CreateUserAndRole", - "DropDatabase", - "DropData", - "ReadData", - "WriteData", - "ManageShard", - "ManageContinuousQuery", - "ManageQuery", - "ManageSubscription", - "Monitor", - "KapacitorAPI" - ] - } - ], - "roles": [ - { - "name": "timetravelers", - "permissions": [ - { - "scope": "database", - "name": "telegraf", - "allowed": ["ReadData", "WriteData"] - } - ], - "links": { - "self": "/chronograf/v1/sources/3/roles/timetravelers" - } - } - ], - "links": { - "self": "/chronograf/v1/sources/3/users/docbrown" - } - } - ] - } - }, - "InfluxDB-User": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Unique name of the user", - "maxLength": 254, - "minLength": 1 - }, - "password": { - "type": "string" - }, - "permissions": { - "$ref": "#/definitions/InfluxDB-Permissions" - }, - "roles": { - "$ref": "#/definitions/InfluxDB-Roles" - }, - "links": { - "type": "object", - "description": "URL relations of this user", - "properties": { - "self": { - "type": "string", - "format": "url", - "description": "URI of resource." - } - } - } - }, - "example": { - "name": "docbrown", - "permissions": [ - { - "scope": "all", - "allowed": [ - "ViewAdmin", - "ViewChronograf", - "CreateDatabase", - "CreateUserAndRole", - "DropDatabase", - "DropData", - "ReadData", - "WriteData", - "ManageShard", - "ManageContinuousQuery", - "ManageQuery", - "ManageSubscription", - "Monitor", - "KapacitorAPI" - ] - } - ], - "roles": [ - { - "name": "timetravelers", - "permissions": [ - { - "scope": "database", - "name": "telegraf", - "allowed": ["ReadData", "WriteData"] - } - ], - "links": { - "self": "/chronograf/v1/sources/3/roles/timetravelers" - } - } - ], - "links": { - "self": "/chronograf/v1/sources/3/users/docbrown" - } - } - }, - "InfluxDB-Permissions": { - "description": - "Permissions represent the entire set of permissions a InfluxDB User or InfluxDB Role may have", - "type": "array", - "items": { - "$ref": "#/definitions/InfluxDB-Permission" - } - }, - "InfluxDB-Permission": { - "description": - "Permission is a specific allowance for InfluxDB User or InfluxDB Role bound to a scope of the data source", - "type": "object", - "required": ["scope", "allowed"], - "properties": { - "scope": { - "type": "string", - "description": - "Describes if the permission is for all databases or restricted to one database", - "enum": ["all", "database"] - }, - "name": { - "type": "string", - "description": - "If the scope is database this identifies the name of the database" - }, - "allowed": { - "$ref": "#/definitions/InfluxDB-Allowances" - } - }, - "example": { - "scope": "database", - "name": "telegraf", - "allowed": ["READ", "WRITE"] - } - }, - "AllPermissions": { - "description": - "All possible permissions for this particular datasource. Used as a static list", - "type": "object", - "properties": { - "permissions": { - "$ref": "#/definitions/InfluxDB-Permissions" - }, - "links": { - "type": "object", - "properties": { - "self": { - "description": "Relative link back to the permissions endpoint", - "type": "string", - "format": "uri" - }, - "source": { - "description": "Relative link to host with these permissiosn", - "type": "string", - "format": "uri" - } - } - } - } - }, - "InfluxDB-Allowances": { - "description": - "Allowances defines what actions a user can have on a scoped permission", - "type": "array", - "items": { - "type": "string", - "description": - "OSS InfluxDB is READ and WRITE. Enterprise is all others", - "enum": [ - "READ", - "WRITE", - "NoPermissions", - "ViewAdmin", - "ViewChronograf", - "CreateDatabase", - "CreateUserAndRole", - "AddRemoveNode", - "DropDatabase", - "DropData", - "ReadData", - "WriteData", - "Rebalance", - "ManageShard", - "ManageContinuousQuery", - "ManageQuery", - "ManageSubscription", - "Monitor", - "CopyShard", - "KapacitorAPI", - "KapacitorConfigAPI" - ] - } - }, - "Layouts": { - "required": ["layouts"], - "type": "object", - "properties": { - "layouts": { - "type": "array", - "items": { - "$ref": "#/definitions/Layout" - } - } - } - }, - "Layout": { - "type": "object", - "required": ["cells", "app", "measurement"], - "properties": { - "id": { - "type": "string", - "description": - "ID is an opaque string that uniquely identifies this layout." - }, - "app": { - "type": "string", - "description": "App is the user facing name of this Layout" - }, - "measurement": { - "type": "string", - "description": - "Measurement is the descriptive name of the time series data." - }, - "cells": { - "type": "array", - "description": "Cells are the individual visualization elements.", - "items": { - "$ref": "#/definitions/Cell" - } - }, - "link": { - "$ref": "#/definitions/Link" - } - }, - "example": { - "id": "0e980b97-c162-487b-a815-3f955df62430", - "app": "docker", - "measurement": "docker_container_net", - "autoflow": true, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef23", - "name": "Docker - Container Network", - "queries": [ - { - "query": - "SELECT derivative(mean(\"tx_bytes\"), 10s) AS \"net_tx_bytes\" FROM \"docker_container_net\"", - "groupbys": ["\"container_name\""] - }, - { - "query": - "SELECT derivative(mean(\"rx_bytes\"), 10s) AS \"net_rx_bytes\" FROM \"docker_container_net\"", - "groupbys": ["\"container_name\""] - } - ], - "type": "" - } - ], - "link": { - "href": "/chronograf/v1/layouts/0e980b97-c162-487b-a815-3f955df62430", - "rel": "self" - } - } - }, - "Mappings": { - "type": "object", - "required": ["mappings"], - "properties": { - "mappings": { - "type": "array", - "items": { - "$ref": "#/definitions/Mapping" - } - } - } - }, - "Mapping": { - "type": "object", - "required": ["measurement", "name"], - "properties": { - "measurement": { - "description": "The measurement where data for this mapping is found", - "type": "string" - }, - "name": { - "description": - "The application name which will be assigned to the corresponding measurement", - "type": "string" - } - }, - "example": { - "measurement": "riak", - "name": "riak" - } - }, - "Cell": { - "type": "object", - "required": ["i", "x", "y", "w", "h"], - "properties": { - "i": { - "description": "Unique ID of Cell", - "type": "string", - "format": "uuid4" - }, - "x": { - "description": "X-coordinate of Cell in the Dashboard", - "type": "integer", - "format": "int32" - }, - "y": { - "description": "Y-coordinate of Cell in the Dashboard", - "type": "integer", - "format": "int32" - }, - "w": { - "description": "Width of Cell in the Dashboard", - "type": "integer", - "format": "int32", - "minimum": 1, - "default": 4 - }, - "h": { - "description": "Height of Cell in the Dashboard", - "type": "integer", - "format": "int32", - "minimum": 1, - "default": 4 - }, - "name": { - "description": "Title of Cell in the Dashboard", - "type": "string" - }, - "queries": { - "description": "Time-series data queries for Cell", - "type": "array", - "items": { - "$ref": "#/definitions/DashboardQuery" - } - }, - "axes": { - "description": "The viewport for a Cell's visualizations", - "type": "object", - "properties": { - "x": { - "$ref": "#/definitions/Axis" - }, - "y": { - "$ref": "#/definitions/Axis" - }, - "y2": { - "$ref": "#/definitions/Axis" - } - } - }, - "type": { - "description": "Cell visualization type", - "type": "string", - "enum": [ - "single-stat", - "line", - "line-plus-single-stat", - "line-stacked", - "line-stepplot", - "bar", - "gauge", - "table" - ], - "default": "line" - }, - "colors": { - "description": "Colors define encoding data into a visualization", - "type": "array", - "items": { - "$ref": "#/definitions/DashboardColor" - } - }, - "legend": { - "description": - "Legend define encoding of the data into a cell's legend", - "type": "object", - "properties": { - "type": { - "description": "type is the style of the legend", - "type": "string", - "enum": ["static"] - }, - "orientation": { - "description": - "orientation is the location of the legend with respect to the cell graph", - "type": "string", - "enum": ["top", "bottom", "left", "right"] - } - } - }, - "tableOptions": { - "verticalTimeAxis": { - "description": - "verticalTimeAxis describes the orientation of the table by indicating whether the time axis will be displayed vertically", - "type": "boolean" - }, - "sortBy": { - "description": - "sortBy contains the name of the series that is used for sorting the table", - "type": "object", - "$ref": "#/definitions/RenamableField" - }, - "wrapping": { - "description": - "wrapping describes the text wrapping style to be used in table cells", - "type": "string", - "enum": ["truncate", "wrap", "single-line"] - }, - "fixFirstColumn": { - "description": - "fixFirstColumn indicates whether the first column of the table should be locked", - "type": "boolean" - } - }, - "fieldOptions": { - "description": - "fieldOptions represent the fields retrieved by the query with customization options", - "type": "array", - "items": { - "$ref": "#/definitions/RenamableField" - } - }, - "timeFormat": { - "description": - "timeFormat describes the display format for time values according to moment.js date formatting", - "type": "string" - }, - "decimalPoints": { - "description": - "decimal points indicates whether and how many digits to show after decimal point", - "type": "object", - "properties": { - "isEnforced": { - "description": - "Indicates whether decimal point setting should be enforced", - "type": "bool" - }, - "digits": { - "description": "The number of digits after decimal to display", - "type": "integer" - } - } - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - } - } - } - }, - "example": { - "x": 5, - "y": 5, - "w": 4, - "h": 4, - "name": "usage_user", - "queries": [ - { - "query": - "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", - "label": "%" - } - ], - "type": "line" - } - }, - "LayoutQuery": { - "type": "object", - "required": ["query"], - "properties": { - "label": { - "description": "Optional Y-axis user-facing label for this query", - "type": "string" - }, - "range": { - "description": "Optional default range of the Y-axis", - "type": "object", - "required": ["upper", "lower"], - "properties": { - "upper": { - "description": "Upper bound of the display range of the Y-axis", - "type": "integer", - "format": "int64" - }, - "lower": { - "description": "Lower bound of the display range of the Y-axis", - "type": "integer", - "format": "int64" - } - } - }, - "query": { - "type": "string" - }, - "wheres": { - "description": "Defines the condition clauses for influxdb", - "type": "array", - "items": { - "type": "string" - } - }, - "groupbys": { - "description": "Defines the group by clauses for influxdb", - "type": "array", - "items": { - "type": "string" - } - } - }, - "example": { - "label": "# warnings", - "query": - "SELECT count(\"check_id\") as \"Number Warning\" FROM consul_health_checks", - "wheres": ["\"status\" = 'warning'"], - "groupbys": ["\"service_name\""] - } - }, - "DashboardQuery": { - "type": "object", - "required": ["query"], - "properties": { - "label": { - "description": "Optional Y-axis user-facing label for this query", - "type": "string" - }, - "range": { - "description": "Optional default range of the Y-axis", - "type": "object", - "required": ["upper", "lower"], - "properties": { - "upper": { - "description": "Upper bound of the display range of the Y-axis", - "type": "integer", - "format": "int64" - }, - "lower": { - "description": "Lower bound of the display range of the Y-axis", - "type": "integer", - "format": "int64" - } - } - }, - "query": { - "type": "string" - }, - "source": { - "type": "string", - "format": "url", - "description": "Optional URI for data source for this query" - }, - "queryConfig": { - "$ref": "#/definitions/QueryConfig" - } - }, - "example": { - "id": 4, - "cells": [ - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "name": "", - "queries": [ - { - "query": - "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"cpu\"", - "label": "%", - "queryConfig": { - "database": "", - "measurement": "cpu", - "retentionPolicy": "", - "fields": [ - { - "value": "mean", - "type": "func", - "alias": "mean_usage_user", - "args": [ - { - "value": "usage_user", - "type": "field" - } - ] - } - ], - "tags": {}, - "groupBy": { - "time": "", - "tags": [] - }, - "areTagsAccepted": false - } - } - ], - "type": "line" - } - ], - "name": "dashboard name", - "links": { - "self": "/chronograf/v1/dashboards/4" - } - } - }, - "Dashboards": { - "description": "a list of dashboards", - "type": "object", - "properties": { - "dashboards": { - "type": "array", - "items": { - "$ref": "#/definitions/Dashboard" - } - } - } - }, - "Dashboard": { - "type": "object", - "properties": { - "id": { - "description": "the unique dashboard id", - "type": "integer", - "format": "int64" - }, - "cells": { - "type": "array", - "items": { - "$ref": "#/definitions/Cell" - } - }, - "name": { - "description": "the user-facing name of the dashboard", - "type": "string" - }, - "links": { - "type": "object", - "properties": { - "self": { - "type": "string", - "description": "Self link mapping to this resource", - "format": "url" - } - } - } - }, - "example": { - "id": 4, - "cells": [ - { - "x": 5, - "y": 5, - "w": 4, - "h": 4, - "name": "usage_user", - "queries": [ - { - "query": - "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", - "db": "telegraf", - "label": "%" - } - ], - "type": "line" - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "name": "usage_system", - "queries": [ - { - "query": - "SELECT mean(\"usage_system\") AS \"usage_system\" FROM \"cpu\"", - "db": "telegraf", - "label": "%" - } - ], - "type": "line" - } - ], - "name": "lalalalala", - "links": { - "self": "/chronograf/v1/dashboards/4" - } - } - }, - "DashboardColor": { - "type": "object", - "description": - "Color defines an encoding of a data value into color space", - "properties": { - "id": { - "description": "ID is the unique id of the cell color", - "type": "string" - }, - "type": { - "description": "Type is how the color is used.", - "type": "string", - "enum": ["min", "max", "threshold"] - }, - "hex": { - "description": "Hex is the hex number of the color", - "type": "string", - "maxLength": 7, - "minLength": 7 - }, - "name": { - "description": "Name is the user-facing name of the hex color", - "type": "string" - }, - "value": { - "description": "Value is the data value mapped to this color", - "type": "string" - } - } - }, - "Axis": { - "type": "object", - "description": "A description of a particular axis for a visualization", - "properties": { - "bounds": { - "type": "array", - "minItems": 0, - "maxItems": 2, - "description": - "The extents of an axis in the form [lower, upper]. Clients determine whether bounds are to be inclusive or exclusive of their limits", - "items": { - "type": "integer", - "format": "int64" - } - }, - "label": { - "description": "label is a description of this Axis", - "type": "string" - }, - "prefix": { - "description": - "Prefix represents a label prefix for formatting axis values.", - "type": "string" - }, - "suffix": { - "description": - "Suffix represents a label suffix for formatting axis values.", - "type": "string" - }, - "base": { - "description": - "Base represents the radix for formatting axis values.", - "type": "string" - }, - "scale": { - "description": - "Scale is the axis formatting scale. Supported: \"log\", \"linear\"", - "type": "string" - } - } - }, - "RenamableField": { - "description": - "Describes a field that can be renamed and made visible or invisible", - "type": "object", - "properties": { - "internalName": { - "description": "This is the calculated name of a field", - "readOnly": true, - "type": "string" - }, - "displayName": { - "description": - "This is the name that a field is renamed to by the user", - "type": "string" - }, - "visible": { - "description": - "Indicates whether this field should be visible on the table", - "type": "boolean" - } - } - }, - "Config": { - "description": "Global application configuration", - "type": "object", - "properties": { - "auth": { - "$ref": "#/definitions/AuthConfig" - } - }, - "example": { - "auth": { - "superAdminNewUsers": true - } - } - }, - "AuthConfig": { - "description": "Global application configuration for auth", - "type": "object", - "required": ["superAdminNewUsers"], - "properties": { - "superAdminNewUsers": { - "type": "boolean", - "default": true - } - }, - "example": { - "superAdminNewUsers": true - } - }, - "OrganizationConfig": { - "description": "Configurations for a specific organization", - "type": "object", - "required": ["logViewer"], - "properties": { - "organization": { - "type": "string", - "readOnly": true - }, - "logViewer": { - "$ref": "#/definitions/LogViewerConfig" - } - }, - "example": { - "organization": "default", - "logViewer": { - "columns": [ - { - "name": "severity", - "position": 0, - "encodings": [ - { - "type": "label", - "value": "icon" - }, - { - "type": "label", - "value": "text" - }, - { - "type": "visibility", - "value": "visible" - }, - { - "type": "color", - "name": "ruby", - "value": "emergency" - }, - { - "type": "color", - "name": "rainforest", - "value": "info" - }, - { - "type": "displayName", - "value": "Log Severity!" - } - ] - }, - { - "name": "messages", - "position": 1, - "encodings": [ - { - "type": "visibility", - "value": "hidden" - } - ] - } - ] - } - } - }, - "LogViewerConfig": { - "description": "Contains the organization-specific configuration for the log viewer", - "type": "object", - "required": ["columns"], - "properties": { - "columns": { - "description": "Defines the order, names, and visibility of columns in the log viewer table", - "type": "array", - "items": { - "$ref": "#/definitions/LogViewerColumn" - } - } - }, - "example": { - "columns": [ - { - "name": "severity", - "position": 0, - "encodings": [ - { - "type": "label", - "value": "icon" - }, - { - "type": "label", - "value": "text" - }, - { - "type": "visibility", - "value": "visible" - }, - { - "type": "color", - "name": "ruby", - "value": "emergency" - }, - { - "type": "color", - "name": "rainforest", - "value": "info" - }, - { - "type": "displayName", - "value": "Log Severity!" - } - ] - }, - { - "name": "messages", - "position": 1, - "encodings": [ - { - "type": "visibility", - "value": "hidden" - } - ] - } - ] - } - }, - "LogViewerColumn": { - "description": "Contains the organization-specific configuration for the log viewer", - "type": "object", - "required": [ - "name", - "encodings", - "position" - ], - "properties": { - "name": { - "description": "Unique identifier name of the column", - "type": "string" - }, - "position": { - "type": "integer", - "format": "int32" - }, - "encodings": { - "description": "Composable encoding options for the column", - "type": "array", - "items": { - "description":"Type and value and optional name of an encoding", - "type": "object", - "required": ["type", "value"], - "properties": { - "type": { - "type": "string" - }, - "value": { - "type": "string" - }, - "name": { - "type": "string" - } - } - } - } - }, - "example": { - "name": "severity", - "position": 0, - "encodings": [ - { - "type": "label", - "value": "icon" - }, - { - "type": "label", - "value": "text" - }, - { - "type": "visibility", - "value": "visible" - }, - { - "type": "color", - "name": "ruby", - "value": "emergency" - }, - { - "type": "color", - "name": "rainforest", - "value": "info" - }, - { - "type": "displayName", - "value": "Log Severity!" - } - ] - } - }, - "Routes": { - "type": "object", - "properties": { - "me": { - "description": "Location of the me endpoint.", - "type": "string", - "format": "url" - }, - "layouts": { - "description": "Location of the layouts endpoint", - "type": "string", - "format": "url" - }, - "sources": { - "description": "Location of the sources endpoint", - "type": "string", - "format": "url" - }, - "mappings": { - "description": "Location of the application mappings endpoint", - "type": "string", - "format": "url" - }, - "dashboards": { - "description": "location of the dashboards endpoint", - "type": "string", - "format": "url" - }, - "external": { - "description": - "external links provided to client, ex. status feed URL", - "type": "object", - "properties": { - "statusFeed": { - "description": - "link to a JSON Feed for the News Feed on client's Status Page", - "type": "string", - "format": "url" - }, - "custom": { - "description": - "a collection of custom links set by the user to be rendered in the client User menu", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "url" - } - } - } - } - } - } - }, - "example": { - "layouts": "/chronograf/v1/layouts", - "mappings": "/chronograf/v1/mappings", - "sources": "/chronograf/v1/sources", - "me": "/chronograf/v1/me", - "dashboards": "/chronograf/v1/dashboards", - "external": { - "statusFeed": "http://news.influxdata.com/feed.json", - "custom": [ - { - "name": "InfluxData", - "url": "https://www.influxdata.com" - } - ] - } - } - }, - "Link": { - "type": "object", - "required": ["rel", "href"], - "readOnly": true, - "description": "URI of resource.", - "properties": { - "rel": { - "type": "string" - }, - "href": { - "type": "string", - "format": "url" - } - } - }, - "Error": { - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - } - } - } - } -} diff --git a/chronograf/server/swagger_v2.yml b/chronograf/server/swagger_v2.yml deleted file mode 100644 index d413a817e4f..00000000000 --- a/chronograf/server/swagger_v2.yml +++ /dev/null @@ -1,659 +0,0 @@ -openapi: "3.0.0" -info: - title: Chronograf - version: 1.5.0.0 -servers: - - url: /chronograf/v2 -paths: - /cells: - post: - tags: - - Cells - summary: Create a cell - requestBody: - description: cell to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Cell" - responses: - '201': - description: Added cell - content: - application/json: - schema: - $ref: "#/components/schemas/Cell" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - tags: - - Cells - summary: Get all cells - responses: - '200': - description: all cells - content: - application/json: - schema: - $ref: "#/components/schemas/Cells" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/cells/{cellID}': - get: - tags: - - Cells - summary: Get a single Cell - parameters: - - in: path - name: cellID - schema: - type: string - required: true - description: ID of cell to update - responses: - '200': - description: get a single cell - content: - application/json: - schema: - $ref: "#/components/schemas/Cell" - '404': - description: cell not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - tags: - - Cells - summary: Update a single cell - requestBody: - description: patching of a cell - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Cell" - parameters: - - in: path - name: cellID - schema: - type: string - required: true - description: ID of cell to update - responses: - '200': - description: Updated cell - content: - application/json: - schema: - $ref: "#/components/schemas/Cell" - '404': - description: cell not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Cells - summary: Delete a cell - parameters: - - in: path - name: cellID - schema: - type: string - required: true - description: ID of cell to update - responses: - '204': - description: delete has been accepted - '404': - description: cell not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /dashboards: - post: - tags: - - Dashboards - summary: Create a dashboard - requestBody: - description: dashboard to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - responses: - '201': - description: Added dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - tags: - - Dashboards - summary: Get all dashboards - responses: - '200': - description: all dashboards - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboards" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}': - get: - tags: - - Dashboards - summary: Get a single Dashboard - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '200': - description: get a single dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - tags: - - Dashboards - summary: Update a single dashboard - requestBody: - description: patching of a dashboard - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '200': - description: Updated dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Dashboards - summary: Delete a dashboard - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '204': - description: delete has been accepted - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" -components: - schemas: - Link: - type: object - readOnly: true - description: URI of resource. - properties: - href: - type: string - format: url - required: [href] - Links: - type: object - readOnly: true - properties: - self: - $ref: "#/components/schemas/Link" - required: [self] - Field: - type: object - properties: - value: - description: >- - value is the value of the field. Meaning of the value is implied by - the `type` key - type: string - type: - description: >- - type describes the field type. func is a function; field is a field - reference - type: string - enum: - - func - - field - - integer - - number - - regex - - wildcard - alias: - description: >- - Alias overrides the field name in the returned response. Applies only - if type is `func` - type: string - args: - description: Args are the arguments to the function - type: array - items: - $ref: '#/components/schemas/Field' - QueryConfig: - type: object - required: - - database - - measurement - - retentionPolicy - - areTagsAccepted - - tags - - groupBy - - fields - properties: - id: - type: string - database: - type: string - measurement: - type: string - retentionPolicy: - type: string - areTagsAccepted: - type: boolean - rawText: - type: string - tags: - type: object - groupBy: - type: object - properties: - time: - type: string - tags: - type: array - items: - type: string - required: - - time - - tags - fields: - type: array - items: - $ref: '#/components/schemas/Field' - range: - type: object - properties: - lower: - type: string - upper: - type: string - required: - - lower - - upper - DashboardQuery: - type: object - required: - - query - properties: - label: - type: string - description: Optional Y-axis user-facing label - range: - description: Optional default range of the Y-axis - type: object - required: - - upper - - lower - properties: - upper: - description: Upper bound of the display range of the Y-axis - type: integer - format: int64 - lower: - description: Lower bound of the display range of the Y-axis - type: integer - format: int64 - query: - type: string - source: - type: string - format: url - description: Optional URI for data source for this query - queryConfig: - $ref: '#/components/schemas/QueryConfig' - name: - type: string - description: An optional word or phrase that refers to the query - Axis: - type: object - description: A description of a particular axis for a visualization - properties: - bounds: - type: array - minItems: 0 - maxItems: 2 - description: >- - The extents of an axis in the form [lower, upper]. Clients determine - whether bounds are to be inclusive or exclusive of their limits - items: - type: integer - format: int64 - label: - description: label is a description of this Axis - type: string - prefix: - description: Prefix represents a label prefix for formatting axis values. - type: string - suffix: - description: Suffix represents a label suffix for formatting axis values. - type: string - base: - description: Base represents the radix for formatting axis values. - type: string - scale: - description: 'Scale is the axis formatting scale. Supported: "log", "linear"' - type: string - DashboardColor: - type: object - description: Color defines an encoding of data value into color space - properties: - id: - description: ID is the unique id of the cell color - type: string - type: - description: Type is how the color is used. - type: string - enum: - - min - - max - - threshold - hex: - description: Hex is the hex number of the color - type: string - maxLength: 7 - minLength: 7 - name: - description: Name is the user-facing name of the hex color - type: string - value: - description: Value is the data value mapped to this color - type: number - format: float - RenamableField: - description: Describes a field that can be renamed and made visible or invisible - type: object - properties: - internalName: - description: This is the calculated name of a field - readOnly: true - type: string - displayName: - description: This is the name that a field is renamed to by the user - type: string - visible: - description: Indicates whether this field should be visible on the table - type: boolean - V1Visualization: - properties: - type: - type: string - enum: ["chronograf-v1"] - queries: - type: array - items: - $ref: "#/components/schemas/DashboardQuery" - axes: - description: The viewport for a Cell's visualizations - type: object - properties: - x: - $ref: '#/components/schemas/Axis' - y: - $ref: '#/components/schemas/Axis' - y2: - $ref: '#/components/schemas/Axis' - graphType: - description: The viewport for a cell's graph/visualization - type: string - enum: - - single-stat - - line - - line-plus-single-stat - - line-stacked - - line-stepplot - - bar - - gauge - - table - default: line - colors: - description: Colors define color encoding of data into a visualization - type: array - items: - $ref: "#/components/schemas/DashboardColor" - legend: - description: Legend define encoding of data into a cell's legend - type: object - properties: - type: - description: type is the style of the legend - type: string - enum: - - static - orientation: - description: >- - orientation is the location of the legend with respect to the cell - graph - type: string - enum: - - top - - bottom - - left - - right - tableOptions: - properties: - verticalTimeAxis: - description: >- - verticalTimeAxis describes the orientation of the table by - indicating whether the time axis will be displayed vertically - type: boolean - sortBy: - $ref: "#/components/schemas/RenamableField" - wrapping: - description: wrapping describes the text wrapping style to be used in table cells - type: string - enum: - - truncate - - wrap - - single-line - fixFirstColumn: - description: >- - fixFirstColumn indicates whether the first column of the table - should be locked - type: boolean - fieldOptions: - description: >- - fieldOptions represent the fields retrieved by the query with - customization options - type: array - items: - $ref: '#/components/schemas/RenamableField' - timeFormat: - description: >- - timeFormat describes the display format for time values according to - moment.js date formatting - type: string - decimalPoints: - description: >- - decimal points indicates whether and how many digits to show after - decimal point - type: object - properties: - isEnforced: - description: Indicates whether decimal point setting should be enforced - type: boolean - digits: - description: The number of digists after decimal to display - type: integer - EmptyVisualization: - properties: - type: - type: string - enum: ["empty"] - Cell: - properties: - links: - $ref: "#/components/schemas/Links" - id: - readOnly: true - type: string - name: - type: string - visualization: - oneOf: - - $ref: "#/components/schemas/V1Visualization" - - $ref: "#/components/schemas/EmptyVisualization" - Cells: - type: object - properties: - links: - $ref: "#/components/schemas/Links" - cells: - type: array - items: - $ref: "#/components/schemas/Cell" - DashboardCell: - type: object - properties: - x: - type: integer - format: int32 - y: - type: integer - format: int32 - w: - type: integer - format: int32 - h: - type: integer - format: int32 - ref: - type: string - description: The reference to a cell from the cells API - Dashboard: - properties: - links: - $ref: "#/components/schemas/Links" - id: - readOnly: true - type: string - name: - type: string - cells: - type: array - items: - $ref: "#/components/schemas/DashboardCell" - Dashboards: - type: object - properties: - links: - $ref: "#/components/schemas/Links" - dashboards: - type: array - items: - $ref: "#/components/schemas/Dashboards" - Error: - properties: - code: - readOnly: true - type: integer - format: int32 - message: - readOnly: true - type: string - required: [code, message] diff --git a/chronograf/server/templates.go b/chronograf/server/templates.go deleted file mode 100644 index 8646c5d89a8..00000000000 --- a/chronograf/server/templates.go +++ /dev/null @@ -1,252 +0,0 @@ -package server - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - idgen "github.com/influxdata/influxdb/v2/chronograf/id" -) - -// ValidTemplateRequest checks if the request sent to the server is the correct format. -func ValidTemplateRequest(template *chronograf.Template) error { - switch template.Type { - default: - return fmt.Errorf("unknown template type %s", template.Type) - case "constant", "csv", "fieldKeys", "tagKeys", "tagValues", "measurements", "databases", "map", "influxql", "text": - } - - for _, v := range template.Values { - switch v.Type { - default: - return fmt.Errorf("unknown template variable type %s", v.Type) - case "csv", "map", "fieldKey", "tagKey", "tagValue", "measurement", "database", "constant", "influxql": - } - - if template.Type == "map" && v.Key == "" { - return fmt.Errorf("templates of type 'map' require a 'key'") - } - } - - if template.Type == "influxql" && template.Query == nil { - return fmt.Errorf("no query set for template of type 'influxql'") - } - - return nil -} - -type templateLinks struct { - Self string `json:"self"` // Self link mapping to this resource -} - -type templateResponse struct { - chronograf.Template - Links templateLinks `json:"links"` -} - -func newTemplateResponses(dID chronograf.DashboardID, tmps []chronograf.Template) []templateResponse { - res := make([]templateResponse, len(tmps)) - for i, t := range tmps { - res[i] = newTemplateResponse(dID, t) - } - return res -} - -type templatesResponses struct { - Templates []templateResponse `json:"templates"` -} - -func newTemplateResponse(dID chronograf.DashboardID, tmp chronograf.Template) templateResponse { - base := "/chronograf/v1/dashboards" - return templateResponse{ - Template: tmp, - Links: templateLinks{ - Self: fmt.Sprintf("%s/%d/templates/%s", base, dID, tmp.ID), - }, - } -} - -// Templates returns all templates from a dashboard within the store -func (s *Service) Templates(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - d, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - res := templatesResponses{ - Templates: newTemplateResponses(chronograf.DashboardID(id), d.Templates), - } - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// NewTemplate adds a template to an existing dashboard -func (s *Service) NewTemplate(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - var template chronograf.Template - if err := json.NewDecoder(r.Body).Decode(&template); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := ValidTemplateRequest(&template); err != nil { - invalidData(w, err, s.Logger) - return - } - - ids := idgen.UUID{} - tid, err := ids.Generate() - if err != nil { - msg := fmt.Sprintf("Error creating template ID for dashboard %d: %v", id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - template.ID = chronograf.TemplateID(tid) - - dash.Templates = append(dash.Templates, template) - if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil { - msg := fmt.Sprintf("Error adding template %s to dashboard %d: %v", tid, id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - res := newTemplateResponse(dash.ID, template) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// TemplateID retrieves a specific template from a dashboard -func (s *Service) TemplateID(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - tid := httprouter.GetParamFromContext(ctx, "tid") - for _, t := range dash.Templates { - if t.ID == chronograf.TemplateID(tid) { - res := newTemplateResponse(chronograf.DashboardID(id), t) - encodeJSON(w, http.StatusOK, res, s.Logger) - return - } - } - - notFound(w, id, s.Logger) -} - -// RemoveTemplate removes a specific template from an existing dashboard -func (s *Service) RemoveTemplate(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - tid := httprouter.GetParamFromContext(ctx, "tid") - pos := -1 - for i, t := range dash.Templates { - if t.ID == chronograf.TemplateID(tid) { - pos = i - break - } - } - if pos == -1 { - notFound(w, id, s.Logger) - return - } - - dash.Templates = append(dash.Templates[:pos], dash.Templates[pos+1:]...) - if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil { - msg := fmt.Sprintf("Error removing template %s from dashboard %d: %v", tid, id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// ReplaceTemplate replaces a template entirely within an existing dashboard -func (s *Service) ReplaceTemplate(w http.ResponseWriter, r *http.Request) { - id, err := paramID("id", r) - if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger) - return - } - - ctx := r.Context() - dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id)) - if err != nil { - notFound(w, id, s.Logger) - return - } - - tid := httprouter.GetParamFromContext(ctx, "tid") - pos := -1 - for i, t := range dash.Templates { - if t.ID == chronograf.TemplateID(tid) { - pos = i - break - } - } - if pos == -1 { - notFound(w, id, s.Logger) - return - } - - var template chronograf.Template - if err := json.NewDecoder(r.Body).Decode(&template); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := ValidTemplateRequest(&template); err != nil { - invalidData(w, err, s.Logger) - return - } - template.ID = chronograf.TemplateID(tid) - - dash.Templates[pos] = template - if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil { - msg := fmt.Sprintf("Error updating template %s in dashboard %d: %v", tid, id, err) - Error(w, http.StatusInternalServerError, msg, s.Logger) - return - } - - res := newTemplateResponse(chronograf.DashboardID(id), template) - encodeJSON(w, http.StatusOK, res, s.Logger) -} diff --git a/chronograf/server/templates_test.go b/chronograf/server/templates_test.go deleted file mode 100644 index 8b0c8711178..00000000000 --- a/chronograf/server/templates_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package server - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -func TestValidTemplateRequest(t *testing.T) { - tests := []struct { - name string - template *chronograf.Template - wantErr bool - }{ - { - name: "Valid Template", - template: &chronograf.Template{ - Type: "fieldKeys", - TemplateVar: chronograf.TemplateVar{ - Values: []chronograf.TemplateValue{ - { - Type: "fieldKey", - }, - }, - }, - }, - }, - { - name: "Invalid Template Type", - wantErr: true, - template: &chronograf.Template{ - Type: "Unknown Type", - TemplateVar: chronograf.TemplateVar{ - Values: []chronograf.TemplateValue{ - { - Type: "fieldKey", - }, - }, - }, - }, - }, - { - name: "Invalid Template Variable Type", - wantErr: true, - template: &chronograf.Template{ - Type: "csv", - TemplateVar: chronograf.TemplateVar{ - Values: []chronograf.TemplateValue{ - { - Type: "unknown value", - }, - }, - }, - }, - }, - { - name: "No query set", - wantErr: true, - template: &chronograf.Template{ - Type: "influxql", - }, - }, - { - name: "Valid Map type", - template: &chronograf.Template{ - Type: "map", - TemplateVar: chronograf.TemplateVar{ - Values: []chronograf.TemplateValue{ - { - Key: "key", - Value: "value", - Type: "map", - }, - }, - }, - }, - }, - { - name: "Map without Key", - wantErr: true, - template: &chronograf.Template{ - Type: "map", - TemplateVar: chronograf.TemplateVar{ - Values: []chronograf.TemplateValue{ - { - Value: "value", - Type: "map", - }, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := ValidTemplateRequest(tt.template); (err != nil) != tt.wantErr { - t.Errorf("ValidTemplateRequest() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/chronograf/server/test_helpers.go b/chronograf/server/test_helpers.go deleted file mode 100644 index e53c953aad3..00000000000 --- a/chronograf/server/test_helpers.go +++ /dev/null @@ -1,20 +0,0 @@ -package server - -import ( - "encoding/json" - - "github.com/google/go-cmp/cmp" -) - -func jsonEqual(s1, s2 string) (eq bool, err error) { - var o1, o2 interface{} - - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - return cmp.Equal(o1, o2), nil -} diff --git a/chronograf/server/url_prefixer.go b/chronograf/server/url_prefixer.go deleted file mode 100644 index 1fd298c4698..00000000000 --- a/chronograf/server/url_prefixer.go +++ /dev/null @@ -1,192 +0,0 @@ -package server - -import ( - "bufio" - "bytes" - "io" - "net/http" - "regexp" - - "github.com/influxdata/influxdb/v2/chronograf" -) - -const ( - ErrNotFlusher = "Expected http.ResponseWriter to be an http.Flusher, but wasn't" -) - -// URLPrefixer is a wrapper for an http.Handler that will prefix all occurrences of a relative URL with the configured Prefix -type URLPrefixer struct { - Prefix string // the prefix to be appended after any detected Attrs - Next http.Handler // the http.Handler which will generate the content to be modified by this handler - Attrs [][]byte // a list of attrs that should have their URLs prefixed. For example `src="` or `href="` would be valid - Logger chronograf.Logger // The logger where prefixing errors will be dispatched to -} - -type wrapResponseWriter struct { - http.ResponseWriter - Substitute *io.PipeWriter - - headerWritten bool - dupHeader *http.Header -} - -func (wrw *wrapResponseWriter) Write(p []byte) (int, error) { - return wrw.Substitute.Write(p) -} - -func (wrw *wrapResponseWriter) WriteHeader(code int) { - if !wrw.headerWritten { - wrw.ResponseWriter.Header().Set("Content-Type", wrw.dupHeader.Get("Content-Type")) - header := wrw.ResponseWriter.Header() - // Filter out content length header to prevent stopping writing - if wrw.dupHeader != nil { - for k, v := range *wrw.dupHeader { - if k == "Content-Length" { - continue - } - header[k] = v - } - } - - wrw.headerWritten = true - } - wrw.ResponseWriter.WriteHeader(code) -} - -// Header() copies the Header map from the underlying ResponseWriter to prevent -// modifications to it by callers -func (wrw *wrapResponseWriter) Header() http.Header { - if wrw.dupHeader == nil { - h := http.Header{} - origHeader := wrw.ResponseWriter.Header() - for k, v := range origHeader { - h[k] = v - } - wrw.dupHeader = &h - } - return *wrw.dupHeader -} - -// ChunkSize is the number of bytes per chunked transfer-encoding -const ChunkSize int = 512 - -// ServeHTTP implements an http.Handler that prefixes relative URLs from the -// Next handler with the configured prefix. It does this by examining the -// stream through the ResponseWriter, and appending the Prefix after any of the -// Attrs detected in the stream. -func (up *URLPrefixer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - // extract the flusher for flushing chunks - flusher, ok := rw.(http.Flusher) - - if !ok { - up.Logger.Info(ErrNotFlusher) - up.Next.ServeHTTP(rw, r) - return - } - - isSVG, _ := regexp.Match(".svg$", []byte(r.URL.String())) - if isSVG { - up.Next.ServeHTTP(rw, r) - return - } - - // chunked transfer because we're modifying the response on the fly, so we - // won't know the final content-length - rw.Header().Set("Connection", "Keep-Alive") - rw.Header().Set("Transfer-Encoding", "chunked") - - writtenCount := 0 // number of bytes written to rw - nextRead, nextWrite := io.Pipe() - go func() { - defer nextWrite.Close() - up.Next.ServeHTTP(&wrapResponseWriter{ResponseWriter: rw, Substitute: nextWrite}, r) - }() - - // setup a buffer which is the max length of our target attrs - b := make([]byte, up.maxlen(up.Attrs...)) - io.ReadFull(nextRead, b) // prime the buffer with the start of the input - buf := bytes.NewBuffer(b) - - // Read next handler's response byte by byte - src := bufio.NewScanner(nextRead) - src.Split(bufio.ScanBytes) - for { - window := buf.Bytes() - - // advance a byte if window is not a src attr - if matchlen, match := up.match(window, up.Attrs...); matchlen == 0 { - if src.Scan() { - // shift the next byte into buf - rw.Write(buf.Next(1)) - writtenCount++ - buf.Write(src.Bytes()) - - if writtenCount >= ChunkSize { - flusher.Flush() - writtenCount = 0 - } - } else { - if err := src.Err(); err != nil { - up.Logger. - WithField("component", "prefixer"). - Error("Error encountered while scanning: err:", err) - } - rw.Write(window) - flusher.Flush() - break - } - continue - } else { - buf.Next(matchlen) // advance to the relative URL - for i := 0; i < matchlen; i++ { - src.Scan() - buf.Write(src.Bytes()) - } - rw.Write(match) // add the src attr to the output - io.WriteString(rw, up.Prefix) // write the prefix - } - } -} - -// match compares the subject against a list of targets. If there is a match -// between any of them a non-zero value is returned. The returned value is the -// length of the match. It is assumed that subject's length > length of all -// targets. The matching []byte is also returned as the second return parameter -func (up *URLPrefixer) match(subject []byte, targets ...[]byte) (int, []byte) { - for _, target := range targets { - if bytes.Equal(subject[:len(target)], target) { - return len(target), target - } - } - return 0, []byte{} -} - -// maxlen returns the length of the largest []byte provided to it as an argument -func (up *URLPrefixer) maxlen(targets ...[]byte) int { - max := 0 - for _, tgt := range targets { - if tlen := len(tgt); tlen > max { - max = tlen - } - } - return max -} - -// NewDefaultURLPrefixer returns a URLPrefixer that will prefix any src and -// href attributes found in HTML as well as any url() directives found in CSS -// with the provided prefix. Additionally, it will prefix any `data-basepath` -// attributes as well for informing front end logic about any prefixes. `next` -// is the next http.Handler that will have its output prefixed -func NewDefaultURLPrefixer(prefix string, next http.Handler, lg chronograf.Logger) *URLPrefixer { - return &URLPrefixer{ - Prefix: prefix, - Next: next, - Logger: lg, - Attrs: [][]byte{ - []byte(`src="`), - []byte(`href="`), - []byte(`url(`), - []byte(`data-basepath="`), // for forwarding basepath to frontend - }, - } -} diff --git a/chronograf/server/url_prefixer_test.go b/chronograf/server/url_prefixer_test.go deleted file mode 100644 index 48fb68b92eb..00000000000 --- a/chronograf/server/url_prefixer_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package server_test - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/server" -) - -var prefixerTests = []struct { - name string - subject string - expected string - shouldErr bool - attrs [][]byte -}{ - { - `One script tag`, - ` - `, - ` - - - - Chronograf - - -
- - `, - false, - [][]byte{ - []byte(`src="`), - []byte(`href="`), - }, - }, -} - -func Test_Server_Prefixer_RewritesURLs(t *testing.T) { - t.Parallel() - - for _, test := range prefixerTests { - subject := test.subject - expected := test.expected - - backend := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, subject) - }) - - pfx := &server.URLPrefixer{Prefix: "/arbitraryprefix", Next: backend, Attrs: test.attrs} - - ts := httptest.NewServer(pfx) - defer ts.Close() - - res, err := http.Get(ts.URL) - if err != nil { - t.Error("Unexpected error fetching from prefixer: err:", err) - } - - actual, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Error("Unable to read prefixed body: err:", err) - } - - if string(actual) != expected+"\n" { - t.Error(test.name, ":\n Unsuccessful prefixing.\n\tWant:", fmt.Sprintf("%+q", expected), "\n\tGot: ", fmt.Sprintf("%+q", string(actual))) - } - } -} - -// clogger is an http.ResponseWriter that is not an http.Flusher. It is used -// for testing the behavior of handlers that may rely on specific behavior of -// http.Flusher -type clogger struct { - next http.ResponseWriter -} - -func (c *clogger) Header() http.Header { - return c.next.Header() -} - -func (c *clogger) Write(bytes []byte) (int, error) { - return c.next.Write(bytes) -} - -func (c *clogger) WriteHeader(code int) { - c.next.WriteHeader(code) -} - -func Test_Server_Prefixer_NoPrefixingWithoutFlusther(t *testing.T) { - backend := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - fmt.Fprintf(rw, "Hill Valley Preservation Society") - }) - - wrapFunc := func(next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - clog := &clogger{rw} - next.ServeHTTP(clog, r) - }) - } - - tl := &mocks.TestLogger{} - pfx := &server.URLPrefixer{ - Prefix: "/hill", - Next: backend, - Logger: tl, - Attrs: [][]byte{ - []byte("href=\""), - }, - } - - ts := httptest.NewServer(wrapFunc(pfx)) - defer ts.Close() - - res, err := http.Get(ts.URL) - if err != nil { - t.Fatal("Unexpected error fetching from prefixer: err:", err) - } - - actual, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal("Unable to read prefixed body: err:", err) - } - - unexpected := "Hill Valley Preservation Society" - expected := "Hill Valley Preservation Society" - if string(actual) == unexpected { - t.Error("No Flusher", ":\n Prefixing occurred without an http.Flusher") - } - - if string(actual) != expected { - t.Error("No Flusher", ":\n\tPrefixing failed to output without an http.Flusher\n\t\tWant:\n", expected, "\n\t\tGot:\n", string(actual)) - } - - if !tl.HasMessage("info", server.ErrNotFlusher) { - t.Error("No Flusher", ":\n Expected Error Message: \"", server.ErrNotFlusher, "\" but saw none. Msgs:", tl.Messages) - } -} diff --git a/chronograf/server/users.go b/chronograf/server/users.go deleted file mode 100644 index e9cb286604f..00000000000 --- a/chronograf/server/users.go +++ /dev/null @@ -1,379 +0,0 @@ -package server - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "sort" - "strconv" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -type userRequest struct { - ID uint64 `json:"id,string"` - Name string `json:"name"` - Provider string `json:"provider"` - Scheme string `json:"scheme"` - SuperAdmin bool `json:"superAdmin"` - Roles []chronograf.Role `json:"roles"` -} - -func (r *userRequest) ValidCreate() error { - if r.Name == "" { - return fmt.Errorf("name required on Chronograf User request body") - } - if r.Provider == "" { - return fmt.Errorf("provider required on Chronograf User request body") - } - if r.Scheme == "" { - return fmt.Errorf("scheme required on Chronograf User request body") - } - - // TODO: This Scheme value is hard-coded temporarily since we only currently - // support OAuth2. This hard-coding should be removed whenever we add - // support for other authentication schemes. - r.Scheme = "oauth2" - return r.ValidRoles() -} - -func (r *userRequest) ValidUpdate() error { - if r.Roles == nil { - return fmt.Errorf("no Roles to update") - } - return r.ValidRoles() -} - -func (r *userRequest) ValidRoles() error { - if len(r.Roles) > 0 { - orgs := map[string]bool{} - for _, r := range r.Roles { - if r.Organization == "" { - return fmt.Errorf("no organization was provided") - } - if _, ok := orgs[r.Organization]; ok { - return fmt.Errorf("duplicate organization %q in roles", r.Organization) - } - orgs[r.Organization] = true - switch r.Name { - case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName, roles.WildcardRoleName: - continue - default: - return fmt.Errorf("unknown role %s. Valid roles are 'member', 'viewer', 'editor', 'admin', and '*'", r.Name) - } - } - } - return nil -} - -type userResponse struct { - Links selfLinks `json:"links"` - ID uint64 `json:"id,string"` - Name string `json:"name"` - Provider string `json:"provider"` - Scheme string `json:"scheme"` - SuperAdmin bool `json:"superAdmin"` - Roles []chronograf.Role `json:"roles"` -} - -func newUserResponse(u *chronograf.User, org string) *userResponse { - // This ensures that any user response with no roles returns an empty array instead of - // null when marshaled into JSON. That way, JavaScript doesn't need any guard on the - // key existing and it can simply be iterated over. - if u.Roles == nil { - u.Roles = []chronograf.Role{} - } - var selfLink string - if org != "" { - selfLink = fmt.Sprintf("/chronograf/v1/organizations/%s/users/%d", org, u.ID) - } else { - selfLink = fmt.Sprintf("/chronograf/v1/users/%d", u.ID) - } - return &userResponse{ - ID: u.ID, - Name: u.Name, - Provider: u.Provider, - Scheme: u.Scheme, - Roles: u.Roles, - SuperAdmin: u.SuperAdmin, - Links: selfLinks{ - Self: selfLink, - }, - } -} - -type usersResponse struct { - Links selfLinks `json:"links"` - Users []*userResponse `json:"users"` -} - -func newUsersResponse(users []chronograf.User, org string) *usersResponse { - usersResp := make([]*userResponse, len(users)) - for i, user := range users { - usersResp[i] = newUserResponse(&user, org) - } - sort.Slice(usersResp, func(i, j int) bool { - return usersResp[i].ID < usersResp[j].ID - }) - - var selfLink string - if org != "" { - selfLink = fmt.Sprintf("/chronograf/v1/organizations/%s/users", org) - } else { - selfLink = "/chronograf/v1/users" - } - return &usersResponse{ - Users: usersResp, - Links: selfLinks{ - Self: selfLink, - }, - } -} - -// UserID retrieves a Chronograf user with ID from store -func (s *Service) UserID(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - idStr := httprouter.GetParamFromContext(ctx, "id") - id, err := strconv.ParseUint(idStr, 10, 64) - if err != nil { - Error(w, http.StatusBadRequest, fmt.Sprintf("invalid user id: %s", err.Error()), s.Logger) - return - } - user, err := s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{ID: &id}) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - orgID := httprouter.GetParamFromContext(ctx, "oid") - res := newUserResponse(user, orgID) - location(w, res.Links.Self) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -// NewUser adds a new Chronograf user to store -func (s *Service) NewUser(w http.ResponseWriter, r *http.Request) { - var req userRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - if err := req.ValidCreate(); err != nil { - invalidData(w, err, s.Logger) - return - } - - ctx := r.Context() - - serverCtx := serverContext(ctx) - cfg, err := s.Store.Config(serverCtx).Get(serverCtx) - if err != nil { - Error(w, http.StatusInternalServerError, err.Error(), s.Logger) - return - } - - if err := s.validRoles(serverCtx, req.Roles); err != nil { - invalidData(w, err, s.Logger) - return - } - - user := &chronograf.User{ - Name: req.Name, - Provider: req.Provider, - Scheme: req.Scheme, - Roles: req.Roles, - } - - if cfg.Auth.SuperAdminNewUsers { - req.SuperAdmin = true - } - - if err := setSuperAdmin(ctx, req, user); err != nil { - Error(w, http.StatusUnauthorized, err.Error(), s.Logger) - return - } - - res, err := s.Store.Users(ctx).Add(ctx, user) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - orgID := httprouter.GetParamFromContext(ctx, "oid") - cu := newUserResponse(res, orgID) - location(w, cu.Links.Self) - encodeJSON(w, http.StatusCreated, cu, s.Logger) -} - -// RemoveUser deletes a Chronograf user from store -func (s *Service) RemoveUser(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - idStr := httprouter.GetParamFromContext(ctx, "id") - id, err := strconv.ParseUint(idStr, 10, 64) - if err != nil { - Error(w, http.StatusBadRequest, fmt.Sprintf("invalid user id: %s", err.Error()), s.Logger) - return - } - - u, err := s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{ID: &id}) - if err != nil { - Error(w, http.StatusNotFound, err.Error(), s.Logger) - return - } - if err := s.Store.Users(ctx).Delete(ctx, u); err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// UpdateUser updates a Chronograf user in store -func (s *Service) UpdateUser(w http.ResponseWriter, r *http.Request) { - var req userRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w, s.Logger) - return - } - - ctx := r.Context() - idStr := httprouter.GetParamFromContext(ctx, "id") - id, err := strconv.ParseUint(idStr, 10, 64) - if err != nil { - Error(w, http.StatusBadRequest, fmt.Sprintf("invalid user id: %s", err.Error()), s.Logger) - return - } - - if err := req.ValidUpdate(); err != nil { - invalidData(w, err, s.Logger) - return - } - - u, err := s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{ID: &id}) - if err != nil { - Error(w, http.StatusNotFound, err.Error(), s.Logger) - return - } - - serverCtx := serverContext(ctx) - if err := s.validRoles(serverCtx, req.Roles); err != nil { - invalidData(w, err, s.Logger) - return - } - - // ValidUpdate should ensure that req.Roles is not nil - u.Roles = req.Roles - - // If the request contains a name, it must be the same as the - // one on the user. This is particularly useful to the front-end - // because they would like to provide the whole user object, - // including the name, provider, and scheme in update requests. - // But currently, it is not possible to change name, provider, or - // scheme via the API. - if req.Name != "" && req.Name != u.Name { - err := fmt.Errorf("cannot update Name") - invalidData(w, err, s.Logger) - return - } - if req.Provider != "" && req.Provider != u.Provider { - err := fmt.Errorf("cannot update Provider") - invalidData(w, err, s.Logger) - return - } - if req.Scheme != "" && req.Scheme != u.Scheme { - err := fmt.Errorf("cannot update Scheme") - invalidData(w, err, s.Logger) - return - } - - // Don't allow SuperAdmins to modify their own SuperAdmin status. - // Allowing them to do so could result in an application where there - // are no super admins. - ctxUser, ok := hasUserContext(ctx) - if !ok { - Error(w, http.StatusInternalServerError, "failed to retrieve user from context", s.Logger) - return - } - // If the user being updated is the user making the request and they are - // changing their SuperAdmin status, return an unauthorized error - if ctxUser.ID == u.ID && u.SuperAdmin && !req.SuperAdmin { - Error(w, http.StatusUnauthorized, "user cannot modify their own SuperAdmin status", s.Logger) - return - } - - if err := setSuperAdmin(ctx, req, u); err != nil { - Error(w, http.StatusUnauthorized, err.Error(), s.Logger) - return - } - - err = s.Store.Users(ctx).Update(ctx, u) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - orgID := httprouter.GetParamFromContext(ctx, "oid") - cu := newUserResponse(u, orgID) - location(w, cu.Links.Self) - encodeJSON(w, http.StatusOK, cu, s.Logger) -} - -// Users retrieves all Chronograf users from store -func (s *Service) Users(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - users, err := s.Store.Users(ctx).All(ctx) - if err != nil { - Error(w, http.StatusBadRequest, err.Error(), s.Logger) - return - } - - orgID := httprouter.GetParamFromContext(ctx, "oid") - res := newUsersResponse(users, orgID) - encodeJSON(w, http.StatusOK, res, s.Logger) -} - -func setSuperAdmin(ctx context.Context, req userRequest, user *chronograf.User) error { - // At a high level, this function checks the following - // 1. Is the user making the request a SuperAdmin. - // If they are, allow them to make whatever changes they please. - // - // 2. Is the user making the request trying to change the SuperAdmin - // status. If so, return an error. - // - // 3. If none of the above are the case, let the user make whichever - // changes were requested. - - // Only allow users to set SuperAdmin if they have the superadmin context - // TODO(desa): Refactor this https://github.com/influxdata/influxdb/chronograf/issues/2207 - if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin { - user.SuperAdmin = req.SuperAdmin - } else if !isSuperAdmin && (user.SuperAdmin != req.SuperAdmin) { - // If req.SuperAdmin has been set, and the request was not made with the SuperAdmin - // context, return error - return fmt.Errorf("user does not have authorization required to set SuperAdmin status. See https://github.com/influxdata/influxdb/chronograf/issues/2601 for more information") - } - - return nil -} - -func (s *Service) validRoles(ctx context.Context, rs []chronograf.Role) error { - for i, role := range rs { - // verify that the organization exists - org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &role.Organization}) - if err != nil { - return err - } - if role.Name == roles.WildcardRoleName { - role.Name = org.DefaultRole - rs[i] = role - } - } - - return nil -} diff --git a/chronograf/server/users_test.go b/chronograf/server/users_test.go deleted file mode 100644 index 9bd745a1cd8..00000000000 --- a/chronograf/server/users_test.go +++ /dev/null @@ -1,1771 +0,0 @@ -package server - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/bouk/httprouter" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/mocks" - "github.com/influxdata/influxdb/v2/chronograf/roles" -) - -func TestService_UserID(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - id string - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Get Single Chronograf User", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1337: - return &chronograf.User{ - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - roles.ViewerRole, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - id: "1337", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"id":"1337","superAdmin":false,"name":"billysteve","provider":"google","scheme":"oauth2","links":{"self":"/chronograf/v1/users/1337"},"roles":[{"name":"viewer"}]}`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - UsersStore: tt.fields.UsersStore, - }, - Logger: tt.fields.Logger, - } - - tt.args.r = tt.args.r.WithContext(httprouter.WithParams( - context.Background(), - httprouter.Params{ - { - Key: "id", - Value: tt.id, - }, - })) - - s.UserID(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. UserID() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. UserID() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. UserID() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestService_NewUser(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - OrganizationsStore chronograf.OrganizationsStore - ConfigStore chronograf.ConfigStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - user *userRequest - userKeyUser *chronograf.User - } - tests := []struct { - name string - fields fields - args args - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Create a new Chronograf User", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - nil, - ), - user: &userRequest{ - Name: "bob", - Provider: "github", - Scheme: "oauth2", - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, user *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1338, - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, nil - }, - }, - }, - wantStatus: http.StatusCreated, - wantContentType: "application/json", - wantBody: `{"id":"1338","superAdmin":false,"name":"bob","provider":"github","scheme":"oauth2","roles":[],"links":{"self":"/chronograf/v1/users/1338"}}`, - }, - { - name: "Create a new Chronograf User with multiple roles", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - nil, - ), - user: &userRequest{ - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.ViewerRoleName, - Organization: "2", - }, - }, - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - case "2": - return &chronograf.Organization{ - ID: "2", - Name: "another", - DefaultRole: roles.MemberRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, user *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1338, - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.ViewerRoleName, - Organization: "2", - }, - }, - }, nil - }, - }, - }, - wantStatus: http.StatusCreated, - wantContentType: "application/json", - wantBody: `{"id":"1338","superAdmin":false,"name":"bob","provider":"github","scheme":"oauth2","roles":[{"name":"admin","organization":"1"},{"name":"viewer","organization":"2"}],"links":{"self":"/chronograf/v1/users/1338"}}`, - }, - { - name: "Create a new Chronograf User with multiple roles same org", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - nil, - ), - user: &userRequest{ - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.ViewerRoleName, - Organization: "1", - }, - }, - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, user *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1338, - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.ViewerRoleName, - Organization: "1", - }, - }, - }, nil - }, - }, - }, - wantStatus: http.StatusUnprocessableEntity, - wantContentType: "application/json", - wantBody: `{"code":422,"message":"duplicate organization \"1\" in roles"}`, - }, - { - name: "Create a new SuperAdmin User - Not as superadmin", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - nil, - ), - user: &userRequest{ - Name: "bob", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, user *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1338, - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, nil - }, - }, - }, - wantStatus: http.StatusUnauthorized, - wantContentType: "application/json", - wantBody: `{"code":401,"message":"user does not have authorization required to set SuperAdmin status. See https://github.com/influxdata/influxdb/chronograf/issues/2601 for more information"}`, - }, - { - name: "Create a new SuperAdmin User - as superadmin", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - nil, - ), - user: &userRequest{ - Name: "bob", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, user *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1338, - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - SuperAdmin: true, - }, nil - }, - }, - }, - wantStatus: http.StatusCreated, - wantContentType: "application/json", - wantBody: `{"id":"1338","superAdmin":true,"name":"bob","provider":"github","scheme":"oauth2","roles":[],"links":{"self":"/chronograf/v1/users/1338"}}`, - }, - { - name: "Create a new User with SuperAdminNewUsers: true in ConfigStore", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - nil, - ), - user: &userRequest{ - Name: "bob", - Provider: "github", - Scheme: "oauth2", - }, - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: true, - }, - }, - }, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, user *chronograf.User) (*chronograf.User, error) { - user.ID = 1338 - return user, nil - }, - }, - }, - wantStatus: http.StatusCreated, - wantContentType: "application/json", - wantBody: `{"id":"1338","superAdmin":true,"name":"bob","provider":"github","scheme":"oauth2","roles":[],"links":{"self":"/chronograf/v1/users/1338"}}`, - }, - { - name: "Create a new Chronograf User with multiple roles with wildcard default role", - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "POST", - "http://any.url", - nil, - ), - user: &userRequest{ - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.WildcardRoleName, - Organization: "2", - }, - }, - }, - }, - fields: fields{ - Logger: &chronograf.NoopLogger{}, - ConfigStore: &mocks.ConfigStore{ - Config: &chronograf.Config{ - Auth: chronograf.AuthConfig{ - SuperAdminNewUsers: false, - }, - }, - }, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - case "2": - return &chronograf.Organization{ - ID: "2", - Name: "another", - DefaultRole: roles.MemberRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - AddF: func(ctx context.Context, user *chronograf.User) (*chronograf.User, error) { - return &chronograf.User{ - ID: 1338, - Name: "bob", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.MemberRoleName, - Organization: "2", - }, - }, - }, nil - }, - }, - }, - wantStatus: http.StatusCreated, - wantContentType: "application/json", - wantBody: `{"id":"1338","superAdmin":false,"name":"bob","provider":"github","scheme":"oauth2","roles":[{"name":"admin","organization":"1"},{"name":"member","organization":"2"}],"links":{"self":"/chronograf/v1/users/1338"}}`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - UsersStore: tt.fields.UsersStore, - ConfigStore: tt.fields.ConfigStore, - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: tt.fields.Logger, - } - - buf, _ := json.Marshal(tt.args.user) - tt.args.r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - - ctx := tt.args.r.Context() - if tt.args.userKeyUser != nil { - ctx = context.WithValue(ctx, UserContextKey, tt.args.userKeyUser) - } - - tt.args.r = tt.args.r.WithContext(ctx) - - s.NewUser(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. UserID() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. UserID() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. UserID() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestService_RemoveUser(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - user *chronograf.User - id string - } - tests := []struct { - name string - fields fields - args args - wantStatus int - wantBody string - }{ - { - name: "Delete a Chronograf User", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1339: - return &chronograf.User{ - ID: 1339, - Name: "helena", - Provider: "heroku", - Scheme: "oauth2", - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - DeleteF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "DELETE", - "http://any.url", - nil, - ), - user: &chronograf.User{ - ID: 1338, - Name: "helena", - Provider: "heroku", - Scheme: "oauth2", - }, - id: "1339", - }, - wantStatus: http.StatusNoContent, - }, - { - name: "Deleting yourself", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1339: - return &chronograf.User{ - ID: 1339, - Name: "helena", - Provider: "heroku", - Scheme: "oauth2", - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - DeleteF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "DELETE", - "http://any.url", - nil, - ), - user: &chronograf.User{ - ID: 1339, - Name: "helena", - Provider: "heroku", - Scheme: "oauth2", - }, - id: "1339", - }, - wantStatus: http.StatusNoContent, - wantBody: ``, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - UsersStore: tt.fields.UsersStore, - }, - Logger: tt.fields.Logger, - } - - tt.args.r = tt.args.r.WithContext(httprouter.WithParams( - context.Background(), - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - }, - )) - - if tt.args.user != nil { - ctx := tt.args.r.Context() - ctx = context.WithValue(ctx, UserContextKey, tt.args.user) - tt.args.r = tt.args.r.WithContext(ctx) - } - - s.RemoveUser(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. RemoveUser() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantStatus == http.StatusNoContent { - return - } - if eq, _ := jsonEqual(string(body), tt.wantBody); !eq { - t.Errorf("%q. RemoveUser() = %v, want %v", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestService_UpdateUser(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - OrganizationsStore chronograf.OrganizationsStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - user *userRequest - userKeyUser *chronograf.User - } - tests := []struct { - name string - fields fields - args args - id string - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Update a Chronograf user - no roles", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - }, - user: &userRequest{ - ID: 1336, - Roles: []chronograf.Role{}, - }, - }, - id: "1336", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"id":"1336","superAdmin":false,"name":"bobbetta","provider":"github","scheme":"oauth2","links":{"self":"/chronograf/v1/users/1336"},"roles":[]}`, - }, - { - name: "Update a Chronograf user", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - }, - user: &userRequest{ - ID: 1336, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - }, - }, - }, - id: "1336", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"id":"1336","superAdmin":false,"name":"bobbetta","provider":"github","scheme":"oauth2","links":{"self":"/chronograf/v1/users/1336"},"roles":[{"name":"admin","organization":"1"}]}`, - }, - { - name: "Update a Chronograf user roles different orgs", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - case "2": - return &chronograf.Organization{ - ID: "2", - Name: "another", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - roles.EditorRole, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - }, - user: &userRequest{ - ID: 1336, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.ViewerRoleName, - Organization: "2", - }, - }, - }, - }, - id: "1336", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"id":"1336","superAdmin":false,"name":"bobbetta","provider":"github","scheme":"oauth2","links":{"self":"/chronograf/v1/users/1336"},"roles":[{"name":"admin","organization":"1"},{"name":"viewer","organization":"2"}]}`, - }, - { - name: "Update a Chronograf user roles same org", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - roles.EditorRole, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - user: &userRequest{ - ID: 1336, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - { - Name: roles.ViewerRoleName, - Organization: "1", - }, - }, - }, - }, - id: "1336", - wantStatus: http.StatusUnprocessableEntity, - wantContentType: "application/json", - wantBody: `{"code":422,"message":"duplicate organization \"1\" in roles"}`, - }, - { - name: "SuperAdmin modifying their own SuperAdmin Status - user missing from context", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - user: &userRequest{ - ID: 1336, - SuperAdmin: false, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - }, - }, - }, - id: "1336", - wantStatus: http.StatusInternalServerError, - wantContentType: "application/json", - wantBody: `{"code":500,"message":"failed to retrieve user from context"}`, - }, - { - name: "SuperAdmin modifying their own SuperAdmin Status", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - user: &userRequest{ - ID: 1336, - SuperAdmin: false, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - }, - }, - userKeyUser: &chronograf.User{ - ID: 1336, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - }, - id: "1336", - wantStatus: http.StatusUnauthorized, - wantContentType: "application/json", - wantBody: `{"code":401,"message":"user cannot modify their own SuperAdmin status"}`, - }, - { - name: "Update a SuperAdmin's Roles - without super admin context", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - user: &userRequest{ - ID: 1336, - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - }, - }, - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - }, - }, - id: "1336", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"links":{"self":"/chronograf/v1/users/1336"},"id":"1336","name":"bobbetta","provider":"github","scheme":"oauth2","superAdmin":true,"roles":[{"name":"admin","organization":"1"}]}`, - }, - { - name: "Update a Chronograf user to super admin - without super admin context", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - roles.EditorRole, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - user: &userRequest{ - ID: 1336, - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - }, - }, - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: false, - }, - }, - id: "1336", - wantStatus: http.StatusUnauthorized, - wantContentType: "application/json", - wantBody: `{"code":401,"message":"user does not have authorization required to set SuperAdmin status. See https://github.com/influxdata/influxdb/chronograf/issues/2601 for more information"}`, - }, - { - name: "Update a Chronograf user to super admin - with super admin context", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - OrganizationsStore: &mocks.OrganizationsStore{ - GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) { - switch *q.ID { - case "1": - return &chronograf.Organization{ - ID: "1", - Name: "org", - DefaultRole: roles.ViewerRoleName, - }, nil - } - return nil, fmt.Errorf("org not found") - }, - }, - UsersStore: &mocks.UsersStore{ - UpdateF: func(ctx context.Context, user *chronograf.User) error { - return nil - }, - GetF: func(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) { - switch *q.ID { - case 1336: - return &chronograf.User{ - ID: 1336, - Name: "bobbetta", - Provider: "github", - Scheme: "oauth2", - Roles: []chronograf.Role{ - roles.EditorRole, - }, - }, nil - default: - return nil, fmt.Errorf("user with ID %d not found", *q.ID) - } - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "PATCH", - "http://any.url", - nil, - ), - user: &userRequest{ - ID: 1336, - SuperAdmin: true, - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "1", - }, - }, - }, - userKeyUser: &chronograf.User{ - ID: 0, - Name: "coolUser", - Provider: "github", - Scheme: "oauth2", - SuperAdmin: true, - }, - }, - id: "1336", - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"id":"1336","superAdmin":true,"name":"bobbetta","provider":"github","scheme":"oauth2","links":{"self":"/chronograf/v1/users/1336"},"roles":[{"name":"admin","organization":"1"}]}`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - UsersStore: tt.fields.UsersStore, - OrganizationsStore: tt.fields.OrganizationsStore, - }, - Logger: tt.fields.Logger, - } - - tt.args.r = tt.args.r.WithContext(httprouter.WithParams(context.Background(), - httprouter.Params{ - { - Key: "id", - Value: tt.id, - }, - })) - buf, _ := json.Marshal(tt.args.user) - tt.args.r.Body = ioutil.NopCloser(bytes.NewReader(buf)) - - ctx := tt.args.r.Context() - if tt.args.userKeyUser != nil { - ctx = context.WithValue(ctx, UserContextKey, tt.args.userKeyUser) - } - - tt.args.r = tt.args.r.WithContext(ctx) - - s.UpdateUser(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. UpdateUser() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. UpdateUser() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. UpdateUser()\ngot:%v\n,\nwant:%v", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestService_Users(t *testing.T) { - type fields struct { - UsersStore chronograf.UsersStore - Logger chronograf.Logger - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - wantStatus int - wantContentType string - wantBody string - }{ - { - name: "Get all Chronograf users", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - AllF: func(ctx context.Context) ([]chronograf.User, error) { - return []chronograf.User{ - { - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - roles.EditorRole, - }, - }, - { - ID: 1338, - Name: "bobbettastuhvetta", - Provider: "auth0", - Scheme: "oauth2", - }, - }, nil - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"users":[{"id":"1337","superAdmin":false,"name":"billysteve","provider":"google","scheme":"oauth2","roles":[{"name":"editor"}],"links":{"self":"/chronograf/v1/users/1337"}},{"id":"1338","superAdmin":false,"name":"bobbettastuhvetta","provider":"auth0","scheme":"oauth2","roles":[],"links":{"self":"/chronograf/v1/users/1338"}}],"links":{"self":"/chronograf/v1/users"}}`, - }, - { - name: "Get all Chronograf users, ensuring order of users in response", - fields: fields{ - Logger: &chronograf.NoopLogger{}, - UsersStore: &mocks.UsersStore{ - AllF: func(ctx context.Context) ([]chronograf.User, error) { - return []chronograf.User{ - { - ID: 1338, - Name: "bobbettastuhvetta", - Provider: "auth0", - Scheme: "oauth2", - }, - { - ID: 1337, - Name: "billysteve", - Provider: "google", - Scheme: "oauth2", - Roles: []chronograf.Role{ - roles.EditorRole, - }, - }, - }, nil - }, - }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest( - "GET", - "http://any.url", // can be any valid URL as we are bypassing mux - nil, - ), - }, - wantStatus: http.StatusOK, - wantContentType: "application/json", - wantBody: `{"users":[{"id":"1337","superAdmin":false,"name":"billysteve","provider":"google","scheme":"oauth2","roles":[{"name":"editor"}],"links":{"self":"/chronograf/v1/users/1337"}},{"id":"1338","superAdmin":false,"name":"bobbettastuhvetta","provider":"auth0","scheme":"oauth2","roles":[],"links":{"self":"/chronograf/v1/users/1338"}}],"links":{"self":"/chronograf/v1/users"}}`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Service{ - Store: &mocks.Store{ - UsersStore: tt.fields.UsersStore, - }, - Logger: tt.fields.Logger, - } - - s.Users(tt.args.w, tt.args.r) - - resp := tt.args.w.Result() - content := resp.Header.Get("Content-Type") - body, _ := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != tt.wantStatus { - t.Errorf("%q. Users() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus) - } - if tt.wantContentType != "" && content != tt.wantContentType { - t.Errorf("%q. Users() = %v, want %v", tt.name, content, tt.wantContentType) - } - if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq { - t.Errorf("%q. Users() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody) - } - }) - } -} - -func TestUserRequest_ValidCreate(t *testing.T) { - type args struct { - u *userRequest - } - tests := []struct { - name string - args args - wantErr bool - err error - }{ - { - name: "Valid", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, - }, - wantErr: false, - err: nil, - }, - { - name: "Invalid – Name missing", - args: args{ - u: &userRequest{ - ID: 1337, - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("name required on Chronograf User request body"), - }, - { - name: "Invalid – Provider missing", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("provider required on Chronograf User request body"), - }, - { - name: "Invalid – Scheme missing", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("scheme required on Chronograf User request body"), - }, - { - name: "Invalid roles - bad role name", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "BilliettaSpecialRole", - Organization: "1", - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("unknown role BilliettaSpecialRole. Valid roles are 'member', 'viewer', 'editor', 'admin', and '*'"), - }, - { - name: "Invalid roles - missing organization", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("no organization was provided"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.args.u.ValidCreate() - - if tt.wantErr { - if err == nil || err.Error() != tt.err.Error() { - t.Errorf("%q. ValidCreate(): wantErr %v,\nwant %v,\ngot %v", tt.name, tt.wantErr, tt.err, err) - } - } else { - if err != nil { - t.Errorf("%q. ValidCreate(): wantErr %v,\nwant %v,\ngot %v", tt.name, tt.wantErr, tt.err, err) - } - } - }) - } -} - -func TestUserRequest_ValidUpdate(t *testing.T) { - type args struct { - u *userRequest - } - tests := []struct { - name string - args args - wantErr bool - err error - }{ - { - name: "Valid", - args: args{ - u: &userRequest{ - ID: 1337, - Roles: []chronograf.Role{ - { - Name: roles.EditorRoleName, - Organization: "1", - }, - }, - }, - }, - wantErr: false, - err: nil, - }, - { - name: "Invalid – roles missing", - args: args{ - u: &userRequest{}, - }, - wantErr: true, - err: fmt.Errorf("no Roles to update"), - }, - { - name: "Invalid - bad role name", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "BillietaSpecialOrg", - Organization: "0", - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("unknown role BillietaSpecialOrg. Valid roles are 'member', 'viewer', 'editor', 'admin', and '*'"), - }, - { - name: "Valid – roles empty", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{}, - }, - }, - wantErr: false, - }, - { - name: "Invalid - bad role name", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: "BillietaSpecialOrg", - Organization: "0", - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("unknown role BillietaSpecialOrg. Valid roles are 'member', 'viewer', 'editor', 'admin', and '*'"), - }, - { - name: "Invalid - duplicate organization", - args: args{ - u: &userRequest{ - ID: 1337, - Name: "billietta", - Provider: "auth0", - Scheme: "oauth2", - Roles: []chronograf.Role{ - { - Name: roles.AdminRoleName, - Organization: "0", - }, - { - Name: roles.ViewerRoleName, - Organization: "0", - }, - }, - }, - }, - wantErr: true, - err: fmt.Errorf("duplicate organization \"0\" in roles"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.args.u.ValidUpdate() - - if tt.wantErr { - if err == nil || err.Error() != tt.err.Error() { - t.Errorf("%q. ValidUpdate(): wantErr %v,\nwant %v,\ngot %v", tt.name, tt.wantErr, tt.err, err) - } - } else { - if err != nil { - t.Errorf("%q. ValidUpdate(): wantErr %v,\nwant %v,\ngot %v", tt.name, tt.wantErr, tt.err, err) - } - } - }) - } -} diff --git a/chronograf/server/version.go b/chronograf/server/version.go deleted file mode 100644 index e7fc4c9013d..00000000000 --- a/chronograf/server/version.go +++ /dev/null @@ -1,14 +0,0 @@ -package server - -import ( - "net/http" -) - -// Version handler adds X-Chronograf-Version header to responses -func Version(version string, h http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("X-Chronograf-Version", version) - h.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} diff --git a/cliff.toml b/cliff.toml new file mode 100644 index 00000000000..8cc22a9eb87 --- /dev/null +++ b/cliff.toml @@ -0,0 +1,35 @@ +[changelog] +body = """ +{%- if version %} +## {{ version }} [{{ timestamp | date(format="%Y-%m-%d") }}] +{%- else %} +## [unreleased] +{%- endif %} +---------------------- +{% set grouped_commits = commits | group_by(attribute="group") -%} +{%- set_global groups_arr = [] -%} +{%- for group, _commits in grouped_commits -%} + {%- set_global groups_arr = groups_arr | concat(with=group) -%} +{%- endfor -%} +{% for group in groups_arr | sort | reverse %} + {% set g_commits = grouped_commits[group] -%} + ### {{ group | upper_first }} + {% for commit in g_commits -%} + {%- set message = commit.message | split(pat="\n") | first | split(pat=": ") | slice(start=1) | join(sep=" ") | trim | capitalize -%} + {% set pr_num = message | split(pat=" ") | last | trim_start_matches(pat="(") | trim_end_matches(pat=")") | trim_start_matches(pat="#") %} + {%- set message = message | split(pat=" ") | slice(end=-1) | join(sep=" ") | trim %} + 1. [{{ pr_num }}](https://github.com/influxdata/influxdb/pull/{{ pr_num }}): {{ message }} + {%- endfor %} +{% endfor %} + +""" +trim = true + +[git] +conventional_commits = false +commit_parsers = [ + { message = "^feat*", group = "Features"}, + { message = "^fix*", group = "Bug Fixes"}, +] +filter_commits = true +tag_pattern = "v[12].[0-9].[0-9]*" diff --git a/cmd/chronograf-migrator/README.md b/cmd/chronograf-migrator/README.md deleted file mode 100644 index a8fd9214962..00000000000 --- a/cmd/chronograf-migrator/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Chronograf Migrator - -This tool is used to migrate `1.x` Chronograf `Dashboards` and `Template Variables` to their `2.x` -equivalents using `pkger` packages. The tool expects the user to have the 1.x Chronograf database. - -```sh -chronograf-migrator -h -Usage of chronograf-migrator: - -db string - path to the chronograf database - -output string - path to the output yaml file (default "dashboards.yml") -``` - -## Example Usage - -```sh -$ chronograf-migrator -db chronograf-v1.db -output dashboards.yml -$ INFLUX_TOKEN= influx pkg -o -f dashboards.yml -``` diff --git a/cmd/chronograf-migrator/dashboard.go b/cmd/chronograf-migrator/dashboard.go deleted file mode 100644 index cb4f693da41..00000000000 --- a/cmd/chronograf-migrator/dashboard.go +++ /dev/null @@ -1,404 +0,0 @@ -package main - -import ( - "context" - "errors" - "fmt" - "regexp" - "strings" - "time" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/query/influxql" -) - -func convert1To2Cell(cell chronograf.DashboardCell) *influxdb.Cell { - c := &influxdb.Cell{ - ID: 1, - CellProperty: influxdb.CellProperty{ - X: cell.X, - Y: cell.Y, - W: cell.W, - H: cell.H, - }, - } - - v := influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: cell.Name, - }, - } - - switch cell.Type { - case "line": - v.Properties = influxdb.XYViewProperties{ - Queries: convertQueries(cell.Queries), - Axes: convertAxes(cell.Axes), - Type: "xy", - Legend: convertLegend(cell.Legend), - Geom: "line", - ViewColors: convertColors(cell.CellColors), - Note: cell.Note, - Position: "overlaid", - } - case "line-stacked": - v.Properties = influxdb.XYViewProperties{ - Queries: convertQueries(cell.Queries), - Axes: convertAxes(cell.Axes), - Type: "xy", - Legend: convertLegend(cell.Legend), - Geom: "line", // TODO(desa): maybe this needs to be stacked? - ViewColors: convertColors(cell.CellColors), - Note: cell.Note, - Position: "stacked", - } - case "line-stepplot": - v.Properties = influxdb.XYViewProperties{ - Queries: convertQueries(cell.Queries), - Axes: convertAxes(cell.Axes), - Type: "xy", - Legend: convertLegend(cell.Legend), - Geom: "step", - ViewColors: convertColors(cell.CellColors), - Note: cell.Note, - Position: "overlaid", - } - case "bar": - v.Properties = influxdb.XYViewProperties{ - Queries: convertQueries(cell.Queries), - Axes: convertAxes(cell.Axes), - Type: "xy", - Legend: convertLegend(cell.Legend), - Geom: "bar", - ViewColors: convertColors(cell.CellColors), - Note: cell.Note, - Position: "overlaid", - } - case "line-plus-single-stat": - v.Properties = influxdb.LinePlusSingleStatProperties{ - Queries: convertQueries(cell.Queries), - Axes: convertAxes(cell.Axes), - Legend: convertLegend(cell.Legend), - ViewColors: convertColors(cell.CellColors), - Note: cell.Note, - Position: "overlaid", - } - case "single-stat": - v.Properties = influxdb.SingleStatViewProperties{ - Queries: convertQueries(cell.Queries), - ViewColors: convertColors(cell.CellColors), - Note: cell.Note, - // TODO(desa): what to do about ShowNoteWhenEmpty? - } - case "gauge": - v.Properties = influxdb.GaugeViewProperties{ - Queries: convertQueries(cell.Queries), - ViewColors: convertColors(cell.CellColors), - Note: cell.Note, - // TODO(desa): what to do about ShowNoteWhenEmpty? - } - case "table": - v.Properties = influxdb.TableViewProperties{ - Queries: convertQueries(cell.Queries), - ViewColors: convertColors(cell.CellColors), - //TableOptions - //FieldOptions - Note: cell.Note, - // TODO(desa): what to do about ShowNoteWhenEmpty? - } - case "note": - v.Properties = influxdb.MarkdownViewProperties{ - Note: cell.Note, - } - case "alerts", "news", "guide": - // TODO(desa): these do not have 2.x equivalents - v.Properties = influxdb.EmptyViewProperties{} - default: - v.Properties = influxdb.EmptyViewProperties{} - } - - c.View = &v - - return c -} - -func convert1To2Variable(t chronograf.Template) (influxdb.Variable, error) { - v := influxdb.Variable{ - Description: t.Label, - Name: t.Var[1 : len(t.Var)-1], // trims `:` from variables prefix and suffix - } - - switch t.Type { - case "influxql", "databases", "fieldKeys", "tagKeys", "tagValues", "measurements": - if t.Query == nil { - return v, fmt.Errorf("expected template variable to have non-nil query") - } - } - - switch t.Type { - case "influxql": - v.Arguments = &influxdb.VariableArguments{ - Type: "query", - Values: influxdb.VariableQueryValues{ - Query: fmt.Sprintf("// %s", t.Query.Command), - Language: "flux", - }, - } - case "databases": - v.Arguments = &influxdb.VariableArguments{ - Type: "query", - Values: influxdb.VariableQueryValues{ - Query: fmt.Sprintf("// SHOW DATABASES %s", t.Query.DB), - Language: "flux", - }, - } - case "fieldKeys": - v.Arguments = &influxdb.VariableArguments{ - Type: "query", - Values: influxdb.VariableQueryValues{ - Query: fmt.Sprintf("// SHOW FIELD KEYS FOR %s", t.Query.Measurement), - Language: "flux", - }, - } - case "tagKeys": - v.Arguments = &influxdb.VariableArguments{ - Type: "query", - Values: influxdb.VariableQueryValues{ - Query: fmt.Sprintf("// SHOW TAG KEYS FOR %s", t.Query.Measurement), - Language: "flux", - }, - } - case "tagValues": - v.Arguments = &influxdb.VariableArguments{ - Type: "query", - Values: influxdb.VariableQueryValues{ - Query: fmt.Sprintf("// SHOW TAG VALUES FOR %s", t.Query.TagKey), - Language: "flux", - }, - } - case "measurements": - v.Arguments = &influxdb.VariableArguments{ - Type: "query", - Values: influxdb.VariableQueryValues{ - Query: fmt.Sprintf("// SHOW MEASUREMENTS ON %s", t.Query.DB), - Language: "flux", - }, - } - case "csv", "constant", "text": - values := influxdb.VariableConstantValues{} - for _, val := range t.Values { - values = append(values, val.Value) - } - v.Arguments = &influxdb.VariableArguments{ - Type: "constant", - Values: values, - } - case "map": - values := influxdb.VariableMapValues{} - for _, val := range t.Values { - values[val.Key] = val.Value - } - v.Arguments = &influxdb.VariableArguments{ - Type: "map", - Values: values, - } - default: - return v, fmt.Errorf("unknown variable type %s", t.Type) - } - - return v, nil -} - -func Convert1To2Dashboard(d1 chronograf.Dashboard) (influxdb.Dashboard, []influxdb.Variable, error) { - cells := []*influxdb.Cell{} - for _, cell := range d1.Cells { - cells = append(cells, convert1To2Cell(cell)) - } - - d2 := influxdb.Dashboard{ - Name: d1.Name, - Cells: cells, - } - - vars := []influxdb.Variable{} - for _, template := range d1.Templates { - v, err := convert1To2Variable(template) - if err != nil { - return influxdb.Dashboard{}, nil, err - } - - vars = append(vars, v) - } - - return d2, vars, nil -} - -func convertAxes(a map[string]chronograf.Axis) map[string]influxdb.Axis { - m := map[string]influxdb.Axis{} - for k, v := range a { - m[k] = influxdb.Axis{ - Bounds: v.Bounds, - Label: v.Label, - Prefix: v.Prefix, - Suffix: v.Suffix, - Base: v.Base, - Scale: v.Scale, - } - } - - if _, exists := m["x"]; !exists { - m["x"] = influxdb.Axis{} - } - if _, exists := m["y"]; !exists { - m["y"] = influxdb.Axis{} - } - - return m -} - -func convertLegend(l chronograf.Legend) influxdb.Legend { - return influxdb.Legend{ - Type: l.Type, - Orientation: l.Orientation, - } -} - -func convertColors(cs []chronograf.CellColor) []influxdb.ViewColor { - vs := []influxdb.ViewColor{} - - hasTextColor := false - hasThresholdColor := false - for _, c := range cs { - if c.Type == "text" { - hasTextColor = true - } - if c.Type == "threshold" { - hasThresholdColor = true - } - - v := influxdb.ViewColor{ - ID: c.ID, - Type: c.Type, - Hex: c.Hex, - Name: c.Name, - } - vs = append(vs, v) - } - - if !hasTextColor { - vs = append(vs, influxdb.ViewColor{ - ID: "base", - Type: "text", - Hex: "#00C9FF", - Name: "laser", - Value: 0, - }) - } - - if !hasThresholdColor { - vs = append(vs, influxdb.ViewColor{ - ID: "t", - Type: "threshold", - Hex: "#4591ED", - Name: "ocean", - Value: 80, - }) - } - - return vs -} - -var influxQLVarPattern = regexp.MustCompile(`'?:(\w+):'?`) - -func transpileQuery(q string) (string, error) { - now := time.Now() - t := influxql.NewTranspilerWithConfig(dbrpMapper{}, influxql.Config{ - Now: now, - FallbackToDBRP: true, - }) - - query := q - query = strings.Replace(query, ":interval:", "8675309ns", -1) - query = strings.Replace(query, ":dashboardTime:", "now() - 15m", 1) - query = strings.Replace(query, ":upperDashboardTime:", "now()", 1) - - // TODO(desa): replace all variables not using this hack - query = influxQLVarPattern.ReplaceAllString(query, "'$1'") - - pkg, err := t.Transpile(context.Background(), query) - if err != nil { - return "", err - } - - return ast.Format(pkg), nil -} - -func convertQueries(qs []chronograf.DashboardQuery) []influxdb.DashboardQuery { - - ds := []influxdb.DashboardQuery{} - for _, q := range qs { - queryText := q.Command - if q.Type == "influxql" { - // if the query is influxql, add it as a comment and attempt to - // compile it to flux - queryText = fmt.Sprintf("// %s", queryText) - - tq, err := transpileQuery(q.Command) - if err != nil { - queryText = fmt.Sprintf("// Failed to transpile query: %v\n%s", err, queryText) - } else { - queryText = fmt.Sprintf("// Original Query:\n%s\n\n%s", queryText, tq) - } - } - - d := influxdb.DashboardQuery{ - Text: queryText, - EditMode: "advanced", - } - - ds = append(ds, d) - } - - if len(ds) == 0 { - d := influxdb.DashboardQuery{ - Text: "// cell had no queries", - EditMode: "advanced", - BuilderConfig: influxdb.BuilderConfig{ - // TODO(desa): foo - Buckets: []string{"bucket"}, - }, - } - ds = append(ds, d) - } - - return ds -} - -type dbrpMapper struct{} - -// FindBy returns the dbrp mapping for the specified ID. -func (d dbrpMapper) FindByID(ctx context.Context, orgID influxdb.ID, id influxdb.ID) (*influxdb.DBRPMappingV2, error) { - return nil, errors.New("mapping not found") -} - -// FindMany returns a list of dbrp mappings that match filter and the total count of matching dbrp mappings. -func (d dbrpMapper) FindMany(ctx context.Context, dbrp influxdb.DBRPMappingFilterV2, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMappingV2, int, error) { - return nil, 0, errors.New("mapping not found") -} - -// Create creates a new dbrp mapping, if a different mapping exists an error is returned. -func (d dbrpMapper) Create(ctx context.Context, dbrp *influxdb.DBRPMappingV2) error { - return errors.New("dbrpMapper does not support creating new mappings") -} - -// Update a new dbrp mapping -func (d dbrpMapper) Update(ctx context.Context, dbrp *influxdb.DBRPMappingV2) error { - return errors.New("dbrpMapper does not support updating mappings") -} - -// Delete removes a dbrp mapping. -func (d dbrpMapper) Delete(ctx context.Context, orgID influxdb.ID, id influxdb.ID) error { - return errors.New("dbrpMapper does not support deleting mappings") -} diff --git a/cmd/chronograf-migrator/main.go b/cmd/chronograf-migrator/main.go deleted file mode 100644 index 2bb44d3e822..00000000000 --- a/cmd/chronograf-migrator/main.go +++ /dev/null @@ -1,93 +0,0 @@ -package main - -import ( - "bytes" - "context" - "flag" - "fmt" - "io" - "log" - "os" - "strings" - - "github.com/influxdata/influxdb/v2/chronograf" - "github.com/influxdata/influxdb/v2/chronograf/bolt" - "github.com/influxdata/influxdb/v2/pkger" -) - -var chronografDBPath string -var outputFile string - -func exec(dbPath, out string) error { - logger := log.New(os.Stdout, "", 0) - - c := bolt.NewClient() - c.Path = dbPath - - ctx := context.Background() - - if err := c.Open(ctx, nil, chronograf.BuildInfo{}); err != nil { - return err - } - - dashboardStore := c.DashboardsStore - - ds, err := dashboardStore.All(ctx) - if err != nil { - return err - } - - pkg := &pkger.Template{ - Objects: make([]pkger.Object, 0), - } - - hasVar := map[string]bool{} - for _, d1 := range ds { - d2, vs, err := Convert1To2Dashboard(d1) - if err != nil { - return err - } - - pkg.Objects = append(pkg.Objects, pkger.DashboardToObject(d2.Name, d2)) - - for _, v := range vs { - name := strings.ToLower(v.Name) - if hasVar[name] { - // TODO(desa): not sure what we actually want to do here - logger.Printf("Found duplicate variables with name %q skipping\n", name) - continue - } - hasVar[name] = true - - pkg.Objects = append(pkg.Objects, pkger.VariableToObject(name, v)) - } - } - - f, err := os.Create(out) - if err != nil { - return err - } - defer f.Close() - - b, err := pkg.Encode(pkger.EncodingYAML) - if err != nil { - return err - } - _, err = io.Copy(f, bytes.NewReader(b)) - return err -} - -func main() { - flag.StringVar(&chronografDBPath, "db", "", "path to the chronograf database") - flag.StringVar(&outputFile, "output", "dashboards.yml", "path to the output yaml file") - flag.Parse() - - if chronografDBPath == "" { - fmt.Fprintln(os.Stdout, "must supply db flag") - return - } - - if err := exec(chronografDBPath, outputFile); err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - } -} diff --git a/cmd/influx/authorization.go b/cmd/influx/authorization.go deleted file mode 100644 index 6ef46e419b6..00000000000 --- a/cmd/influx/authorization.go +++ /dev/null @@ -1,674 +0,0 @@ -package main - -import ( - "context" - "io" - - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/cmd/influx/internal" - "github.com/spf13/cobra" -) - -type token struct { - ID platform.ID `json:"id"` - Description string `json:"description"` - Token string `json:"token"` - Status string `json:"status"` - UserName string `json:"userName"` - UserID platform.ID `json:"userID"` - Permissions []string `json:"permissions"` -} - -func cmdAuth(f *globalFlags, opt genericCLIOpts) *cobra.Command { - cmd := opt.newCmd("auth", nil, false) - cmd.Aliases = []string{"authorization"} - cmd.Short = "Authorization management commands" - cmd.Run = seeHelp - - cmd.AddCommand( - authActiveCmd(f, opt), - authCreateCmd(f, opt), - authDeleteCmd(f, opt), - authFindCmd(f, opt), - authInactiveCmd(f, opt), - ) - - return cmd -} - -var authCRUDFlags struct { - id string - json bool - hideHeaders bool -} - -var authCreateFlags struct { - user string - description string - org organization - - writeUserPermission bool - readUserPermission bool - - writeBucketsPermission bool - readBucketsPermission bool - - writeBucketPermissions []string - readBucketPermissions []string - - writeTasksPermission bool - readTasksPermission bool - - writeTelegrafsPermission bool - readTelegrafsPermission bool - - writeOrganizationsPermission bool - readOrganizationsPermission bool - - writeDashboardsPermission bool - readDashboardsPermission bool - - writeCheckPermission bool - readCheckPermission bool - - writeNotificationRulePermission bool - readNotificationRulePermission bool - - writeNotificationEndpointPermission bool - readNotificationEndpointPermission bool - - writeDBRPPermission bool - readDBRPPermission bool -} - -func authCreateCmd(f *globalFlags, opt genericCLIOpts) *cobra.Command { - cmd := &cobra.Command{ - Use: "create", - Short: "Create authorization", - RunE: checkSetupRunEMiddleware(&flags)(authorizationCreateF), - } - - f.registerFlags(opt.viper, cmd) - authCreateFlags.org.register(opt.viper, cmd, false) - - cmd.Flags().StringVarP(&authCreateFlags.description, "description", "d", "", "Token description") - cmd.Flags().StringVarP(&authCreateFlags.user, "user", "u", "", "The user name") - registerPrintOptions(opt.viper, cmd, &authCRUDFlags.hideHeaders, &authCRUDFlags.json) - - cmd.Flags().BoolVarP(&authCreateFlags.writeUserPermission, "write-user", "", false, "Grants the permission to perform mutative actions against organization users") - cmd.Flags().BoolVarP(&authCreateFlags.readUserPermission, "read-user", "", false, "Grants the permission to perform read actions against organization users") - - cmd.Flags().BoolVarP(&authCreateFlags.writeBucketsPermission, "write-buckets", "", false, "Grants the permission to perform mutative actions against organization buckets") - cmd.Flags().BoolVarP(&authCreateFlags.readBucketsPermission, "read-buckets", "", false, "Grants the permission to perform read actions against organization buckets") - - cmd.Flags().StringArrayVarP(&authCreateFlags.writeBucketPermissions, "write-bucket", "", []string{}, "The bucket id") - cmd.Flags().StringArrayVarP(&authCreateFlags.readBucketPermissions, "read-bucket", "", []string{}, "The bucket id") - - cmd.Flags().BoolVarP(&authCreateFlags.writeTasksPermission, "write-tasks", "", false, "Grants the permission to create tasks") - cmd.Flags().BoolVarP(&authCreateFlags.readTasksPermission, "read-tasks", "", false, "Grants the permission to read tasks") - - cmd.Flags().BoolVarP(&authCreateFlags.writeTelegrafsPermission, "write-telegrafs", "", false, "Grants the permission to create telegraf configs") - cmd.Flags().BoolVarP(&authCreateFlags.readTelegrafsPermission, "read-telegrafs", "", false, "Grants the permission to read telegraf configs") - - cmd.Flags().BoolVarP(&authCreateFlags.writeOrganizationsPermission, "write-orgs", "", false, "Grants the permission to create organizations") - cmd.Flags().BoolVarP(&authCreateFlags.readOrganizationsPermission, "read-orgs", "", false, "Grants the permission to read organizations") - - cmd.Flags().BoolVarP(&authCreateFlags.writeDashboardsPermission, "write-dashboards", "", false, "Grants the permission to create dashboards") - cmd.Flags().BoolVarP(&authCreateFlags.readDashboardsPermission, "read-dashboards", "", false, "Grants the permission to read dashboards") - - cmd.Flags().BoolVarP(&authCreateFlags.writeNotificationRulePermission, "write-notificationRules", "", false, "Grants the permission to create notificationRules") - cmd.Flags().BoolVarP(&authCreateFlags.readNotificationRulePermission, "read-notificationRules", "", false, "Grants the permission to read notificationRules") - - cmd.Flags().BoolVarP(&authCreateFlags.writeNotificationEndpointPermission, "write-notificationEndpoints", "", false, "Grants the permission to create notificationEndpoints") - cmd.Flags().BoolVarP(&authCreateFlags.readNotificationEndpointPermission, "read-notificationEndpoints", "", false, "Grants the permission to read notificationEndpoints") - - cmd.Flags().BoolVarP(&authCreateFlags.writeCheckPermission, "write-checks", "", false, "Grants the permission to create checks") - cmd.Flags().BoolVarP(&authCreateFlags.readCheckPermission, "read-checks", "", false, "Grants the permission to read checks") - - cmd.Flags().BoolVarP(&authCreateFlags.writeDBRPPermission, "write-dbrps", "", false, "Grants the permission to create database retention policy mappings") - cmd.Flags().BoolVarP(&authCreateFlags.readDBRPPermission, "read-dbrps", "", false, "Grants the permission to read database retention policy mappings") - - return cmd -} - -func authorizationCreateF(cmd *cobra.Command, args []string) error { - if err := authCreateFlags.org.validOrgFlags(&flags); err != nil { - return err - } - - userSvc, err := newUserService() - if err != nil { - return err - } - - orgSvc, err := newOrganizationService() - if err != nil { - return err - } - - orgID, err := authCreateFlags.org.getID(orgSvc) - if err != nil { - return err - } - - bucketPerms := []struct { - action platform.Action - perms []string - }{ - {action: platform.ReadAction, perms: authCreateFlags.readBucketPermissions}, - {action: platform.WriteAction, perms: authCreateFlags.writeBucketPermissions}, - } - - var permissions []platform.Permission - for _, bp := range bucketPerms { - for _, p := range bp.perms { - var id platform.ID - if err := id.DecodeFromString(p); err != nil { - return err - } - - p, err := platform.NewPermissionAtID(id, bp.action, platform.BucketsResourceType, orgID) - if err != nil { - return err - } - - permissions = append(permissions, *p) - } - } - - providedPerm := []struct { - readPerm, writePerm bool - ResourceType platform.ResourceType - }{ - { - readPerm: authCreateFlags.readBucketsPermission, - writePerm: authCreateFlags.writeBucketsPermission, - ResourceType: platform.BucketsResourceType, - }, - { - readPerm: authCreateFlags.readCheckPermission, - writePerm: authCreateFlags.writeCheckPermission, - ResourceType: platform.ChecksResourceType, - }, - { - readPerm: authCreateFlags.readDashboardsPermission, - writePerm: authCreateFlags.writeDashboardsPermission, - ResourceType: platform.DashboardsResourceType, - }, - { - readPerm: authCreateFlags.readNotificationEndpointPermission, - writePerm: authCreateFlags.writeNotificationEndpointPermission, - ResourceType: platform.NotificationEndpointResourceType, - }, - { - readPerm: authCreateFlags.readNotificationRulePermission, - writePerm: authCreateFlags.writeNotificationRulePermission, - ResourceType: platform.NotificationRuleResourceType, - }, - { - readPerm: authCreateFlags.readOrganizationsPermission, - writePerm: authCreateFlags.writeOrganizationsPermission, - ResourceType: platform.OrgsResourceType, - }, - { - readPerm: authCreateFlags.readTasksPermission, - writePerm: authCreateFlags.writeTasksPermission, - ResourceType: platform.TasksResourceType, - }, - { - readPerm: authCreateFlags.readTelegrafsPermission, - writePerm: authCreateFlags.writeTelegrafsPermission, - ResourceType: platform.TelegrafsResourceType, - }, - - { - readPerm: authCreateFlags.readUserPermission, - writePerm: authCreateFlags.writeUserPermission, - ResourceType: platform.UsersResourceType, - }, - { - readPerm: authCreateFlags.readDBRPPermission, - writePerm: authCreateFlags.writeDBRPPermission, - ResourceType: platform.DBRPResourceType, - }, - } - - for _, provided := range providedPerm { - var actions []platform.Action - if provided.readPerm { - actions = append(actions, platform.ReadAction) - } - if provided.writePerm { - actions = append(actions, platform.WriteAction) - } - - for _, action := range actions { - p, err := platform.NewPermission(action, provided.ResourceType, orgID) - if err != nil { - return err - } - permissions = append(permissions, *p) - } - } - - authorization := &platform.Authorization{ - Description: authCreateFlags.description, - Permissions: permissions, - OrgID: orgID, - } - - if userName := authCreateFlags.user; userName != "" { - user, err := userSvc.FindUser(context.Background(), platform.UserFilter{ - Name: &userName, - }) - if err != nil { - return err - } - authorization.UserID = user.ID - } - - s, err := newAuthorizationService() - if err != nil { - return err - } - - if err := s.CreateAuthorization(context.Background(), authorization); err != nil { - return err - } - - user, err := userSvc.FindUserByID(context.Background(), authorization.UserID) - if err != nil { - return err - } - - ps := make([]string, 0, len(authorization.Permissions)) - for _, p := range authorization.Permissions { - ps = append(ps, p.String()) - } - - return writeTokens(cmd.OutOrStdout(), tokenPrintOpt{ - jsonOut: authCRUDFlags.json, - hideHeaders: authCRUDFlags.hideHeaders, - token: token{ - ID: authorization.ID, - Description: authorization.Description, - Token: authorization.Token, - Status: string(authorization.Status), - UserName: user.Name, - UserID: user.ID, - Permissions: ps, - }, - }) -} - -var authorizationFindFlags struct { - org organization - user string - userID string -} - -func authFindCmd(f *globalFlags, opt genericCLIOpts) *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "List authorizations", - Aliases: []string{"find", "ls"}, - RunE: checkSetupRunEMiddleware(&flags)(authorizationFindF), - } - - f.registerFlags(opt.viper, cmd) - authorizationFindFlags.org.register(opt.viper, cmd, false) - registerPrintOptions(opt.viper, cmd, &authCRUDFlags.hideHeaders, &authCRUDFlags.json) - cmd.Flags().StringVarP(&authorizationFindFlags.user, "user", "u", "", "The user") - cmd.Flags().StringVarP(&authorizationFindFlags.userID, "user-id", "", "", "The user ID") - - cmd.Flags().StringVarP(&authCRUDFlags.id, "id", "i", "", "The authorization ID") - - return cmd -} - -func authorizationFindF(cmd *cobra.Command, args []string) error { - s, err := newAuthorizationService() - if err != nil { - return err - } - - us, err := newUserService() - if err != nil { - return err - } - - var filter platform.AuthorizationFilter - if authCRUDFlags.id != "" { - fID, err := platform.IDFromString(authCRUDFlags.id) - if err != nil { - return err - } - filter.ID = fID - } - if authorizationFindFlags.user != "" { - filter.User = &authorizationFindFlags.user - } - if authorizationFindFlags.userID != "" { - uID, err := platform.IDFromString(authorizationFindFlags.userID) - if err != nil { - return err - } - filter.UserID = uID - } - if authorizationFindFlags.org.name != "" { - filter.Org = &authorizationFindFlags.org.name - } - if authorizationFindFlags.org.id != "" { - oID, err := platform.IDFromString(authorizationFindFlags.org.id) - if err != nil { - return err - } - filter.OrgID = oID - } - - authorizations, _, err := s.FindAuthorizations(context.Background(), filter) - if err != nil { - return err - } - - var tokens []token - for _, a := range authorizations { - var permissions []string - for _, p := range a.Permissions { - permissions = append(permissions, p.String()) - } - - user, err := us.FindUserByID(context.Background(), a.UserID) - if err != nil { - return err - } - - tokens = append(tokens, token{ - ID: a.ID, - Description: a.Description, - Token: a.Token, - Status: string(a.Status), - UserName: user.Name, - UserID: a.UserID, - Permissions: permissions, - }) - } - - return writeTokens(cmd.OutOrStdout(), tokenPrintOpt{ - jsonOut: authCRUDFlags.json, - hideHeaders: authCRUDFlags.hideHeaders, - tokens: tokens, - }) -} - -func authDeleteCmd(f *globalFlags, opt genericCLIOpts) *cobra.Command { - cmd := &cobra.Command{ - Use: "delete", - Short: "Delete authorization", - RunE: checkSetupRunEMiddleware(&flags)(authorizationDeleteF), - } - - f.registerFlags(opt.viper, cmd) - registerPrintOptions(opt.viper, cmd, &authCRUDFlags.hideHeaders, &authCRUDFlags.json) - cmd.Flags().StringVarP(&authCRUDFlags.id, "id", "i", "", "The authorization ID (required)") - cmd.MarkFlagRequired("id") - - return cmd -} - -func authorizationDeleteF(cmd *cobra.Command, args []string) error { - s, err := newAuthorizationService() - if err != nil { - return err - } - - us, err := newUserService() - if err != nil { - return err - } - - id, err := platform.IDFromString(authCRUDFlags.id) - if err != nil { - return err - } - - ctx := context.TODO() - a, err := s.FindAuthorizationByID(ctx, *id) - if err != nil { - return err - } - - if err := s.DeleteAuthorization(context.Background(), *id); err != nil { - return err - } - - user, err := us.FindUserByID(context.Background(), a.UserID) - if err != nil { - return err - } - - ps := make([]string, 0, len(a.Permissions)) - for _, p := range a.Permissions { - ps = append(ps, p.String()) - } - - return writeTokens(cmd.OutOrStdout(), tokenPrintOpt{ - jsonOut: authCRUDFlags.json, - deleted: true, - hideHeaders: authCRUDFlags.hideHeaders, - token: token{ - ID: a.ID, - Description: a.Description, - Token: a.Token, - Status: string(a.Status), - UserName: user.Name, - UserID: user.ID, - Permissions: ps, - }, - }) -} - -func authActiveCmd(f *globalFlags, opt genericCLIOpts) *cobra.Command { - cmd := &cobra.Command{ - Use: "active", - Short: "Active authorization", - RunE: checkSetupRunEMiddleware(&flags)(authorizationActiveF), - } - f.registerFlags(opt.viper, cmd) - - registerPrintOptions(opt.viper, cmd, &authCRUDFlags.hideHeaders, &authCRUDFlags.json) - cmd.Flags().StringVarP(&authCRUDFlags.id, "id", "i", "", "The authorization ID (required)") - cmd.MarkFlagRequired("id") - - return cmd -} - -func authorizationActiveF(cmd *cobra.Command, args []string) error { - s, err := newAuthorizationService() - if err != nil { - return err - } - - us, err := newUserService() - if err != nil { - return err - } - - var id platform.ID - if err := id.DecodeFromString(authCRUDFlags.id); err != nil { - return err - } - - ctx := context.TODO() - if _, err := s.FindAuthorizationByID(ctx, id); err != nil { - return err - } - - a, err := s.UpdateAuthorization(context.Background(), id, &platform.AuthorizationUpdate{ - Status: platform.Active.Ptr(), - }) - if err != nil { - return err - } - - user, err := us.FindUserByID(context.Background(), a.UserID) - if err != nil { - return err - } - - ps := make([]string, 0, len(a.Permissions)) - for _, p := range a.Permissions { - ps = append(ps, p.String()) - } - - return writeTokens(cmd.OutOrStdout(), tokenPrintOpt{ - jsonOut: authCRUDFlags.json, - hideHeaders: authCRUDFlags.hideHeaders, - token: token{ - ID: a.ID, - Description: a.Description, - Token: a.Token, - Status: string(a.Status), - UserName: user.Name, - UserID: user.ID, - Permissions: ps, - }, - }) -} - -func authInactiveCmd(f *globalFlags, opt genericCLIOpts) *cobra.Command { - cmd := &cobra.Command{ - Use: "inactive", - Short: "Inactive authorization", - RunE: checkSetupRunEMiddleware(&flags)(authorizationInactiveF), - } - - f.registerFlags(opt.viper, cmd) - registerPrintOptions(opt.viper, cmd, &authCRUDFlags.hideHeaders, &authCRUDFlags.json) - cmd.Flags().StringVarP(&authCRUDFlags.id, "id", "i", "", "The authorization ID (required)") - cmd.MarkFlagRequired("id") - - return cmd -} - -func authorizationInactiveF(cmd *cobra.Command, args []string) error { - s, err := newAuthorizationService() - if err != nil { - return err - } - - us, err := newUserService() - if err != nil { - return err - } - - var id platform.ID - if err := id.DecodeFromString(authCRUDFlags.id); err != nil { - return err - } - - ctx := context.TODO() - if _, err = s.FindAuthorizationByID(ctx, id); err != nil { - return err - } - - a, err := s.UpdateAuthorization(context.Background(), id, &platform.AuthorizationUpdate{ - Status: platform.Inactive.Ptr(), - }) - if err != nil { - return err - } - - user, err := us.FindUserByID(context.Background(), a.UserID) - if err != nil { - return err - } - - ps := make([]string, 0, len(a.Permissions)) - for _, p := range a.Permissions { - ps = append(ps, p.String()) - } - - return writeTokens(cmd.OutOrStdout(), tokenPrintOpt{ - jsonOut: authCRUDFlags.json, - hideHeaders: authCRUDFlags.hideHeaders, - token: token{ - ID: a.ID, - Description: a.Description, - Token: a.Token, - Status: string(a.Status), - UserName: user.Name, - UserID: user.ID, - Permissions: ps, - }, - }) -} - -type tokenPrintOpt struct { - jsonOut bool - deleted bool - hideHeaders bool - token token - tokens []token -} - -func writeTokens(w io.Writer, printOpts tokenPrintOpt) error { - if printOpts.jsonOut { - var v interface{} = printOpts.tokens - if printOpts.tokens == nil { - v = printOpts.token - } - return writeJSON(w, v) - } - - tabW := internal.NewTabWriter(w) - defer tabW.Flush() - - tabW.HideHeaders(printOpts.hideHeaders) - - headers := []string{ - "ID", - "Description", - "Token", - "User Name", - "User ID", - "Permissions", - } - if printOpts.deleted { - headers = append(headers, "Deleted") - } - tabW.WriteHeaders(headers...) - - if printOpts.tokens == nil { - printOpts.tokens = append(printOpts.tokens, printOpts.token) - } - - for _, t := range printOpts.tokens { - m := map[string]interface{}{ - "ID": t.ID.String(), - "Description": t.Description, - "Token": t.Token, - "User Name": t.UserName, - "User ID": t.UserID.String(), - "Permissions": t.Permissions, - } - if printOpts.deleted { - m["Deleted"] = true - } - tabW.Write(m) - } - - return nil -} - -func newAuthorizationService() (platform.AuthorizationService, error) { - httpClient, err := newHTTPClient() - if err != nil { - return nil, err - } - - return &authorization.AuthorizationClientService{ - Client: httpClient, - }, nil -} diff --git a/cmd/influx/backup.go b/cmd/influx/backup.go deleted file mode 100644 index 34103d4a63e..00000000000 --- a/cmd/influx/backup.go +++ /dev/null @@ -1,342 +0,0 @@ -package main - -import ( - "compress/gzip" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kv" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -func cmdBackup(f *globalFlags, opts genericCLIOpts) *cobra.Command { - return newCmdBackupBuilder(f, opts).cmdBackup() -} - -type cmdBackupBuilder struct { - genericCLIOpts - *globalFlags - - bucketID string - bucketName string - org organization - path string - - manifest influxdb.Manifest - baseName string - - backupService *http.BackupService - kvStore *bolt.KVStore - kvService *kv.Service - tenantService *tenant.Service - metaClient *meta.Client - - logger *zap.Logger -} - -func newCmdBackupBuilder(f *globalFlags, opts genericCLIOpts) *cmdBackupBuilder { - return &cmdBackupBuilder{ - genericCLIOpts: opts, - globalFlags: f, - } -} - -func (b *cmdBackupBuilder) cmdBackup() *cobra.Command { - cmd := b.newCmd("backup", b.backupRunE) - b.org.register(b.viper, cmd, true) - cmd.Flags().StringVar(&b.bucketID, "bucket-id", "", "The ID of the bucket to backup") - cmd.Flags().StringVarP(&b.bucketName, "bucket", "b", "", "The name of the bucket to backup") - cmd.Use = "backup [flags] path" - cmd.Args = func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return fmt.Errorf("must specify output path") - } else if len(args) > 1 { - return fmt.Errorf("too many args specified") - } - b.path = args[0] - return nil - } - cmd.Short = "Backup database" - cmd.Long = ` -Backs up InfluxDB to a directory. - -Examples: - # backup all data - influx backup /path/to/backup -` - return cmd -} - -func (b *cmdBackupBuilder) manifestPath() string { - return fmt.Sprintf("%s.manifest", b.baseName) -} - -func (b *cmdBackupBuilder) kvPath() string { - return fmt.Sprintf("%s.bolt", b.baseName) -} - -func (b *cmdBackupBuilder) shardPath(id uint64) string { - return fmt.Sprintf("%s.s%d", b.baseName, id) + ".tar.gz" -} - -func (b *cmdBackupBuilder) backupRunE(cmd *cobra.Command, args []string) (err error) { - ctx := context.Background() - - // Create top level logger - logconf := influxlogger.NewConfig() - if b.logger, err = logconf.New(os.Stdout); err != nil { - return err - } - - // Determine a base - b.baseName = time.Now().UTC().Format(influxdb.BackupFilenamePattern) - - // Ensure directory exsits. - if err := os.MkdirAll(b.path, 0777); err != nil { - return err - } - - ac := flags.config() - b.backupService = &http.BackupService{ - Addr: ac.Host, - Token: ac.Token, - InsecureSkipVerify: flags.skipVerify, - } - - // Back up Bolt database to file. - if err := b.backupKVStore(ctx); err != nil { - return err - } - - // Open bolt DB. - boltClient := bolt.NewClient(b.logger) - boltClient.Path = filepath.Join(b.path, b.kvPath()) - if err := boltClient.Open(ctx); err != nil { - return err - } - defer boltClient.Close() - - // Open meta store so we can iterate over meta data. - b.kvStore = bolt.NewKVStore(b.logger, filepath.Join(b.path, b.kvPath())) - b.kvStore.WithDB(boltClient.DB()) - - tenantStore := tenant.NewStore(b.kvStore) - b.tenantService = tenant.NewService(tenantStore) - - b.kvService = kv.NewService(b.logger, b.kvStore, b.tenantService, kv.ServiceConfig{}) - - b.metaClient = meta.NewClient(meta.NewConfig(), b.kvStore) - if err := b.metaClient.Open(); err != nil { - return err - } - - // Filter through organizations & buckets to backup appropriate shards. - if err := b.backupOrganizations(ctx); err != nil { - return err - } - - if err := b.writeManifest(ctx); err != nil { - return err - } - - b.logger.Info("Backup complete") - - return nil -} - -// backupKVStore streams the bolt KV file to a file at path. -func (b *cmdBackupBuilder) backupKVStore(ctx context.Context) error { - path := filepath.Join(b.path, b.kvPath()) - b.logger.Info("Backing up KV store", zap.String("path", b.kvPath())) - - // Open writer to output file. - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - // Stream bolt file from server, sync, and ensure file closes correctly. - if err := b.backupService.BackupKVStore(ctx, f); err != nil { - return err - } else if err := f.Sync(); err != nil { - return err - } else if err := f.Close(); err != nil { - return err - } - - // Lookup file size. - fi, err := os.Stat(path) - if err != nil { - return err - } - b.manifest.KV = influxdb.ManifestKVEntry{ - FileName: b.kvPath(), - Size: fi.Size(), - } - - return nil -} - -func (b *cmdBackupBuilder) backupOrganizations(ctx context.Context) (err error) { - // Build a filter if org ID or org name were specified. - var filter influxdb.OrganizationFilter - if b.org.id != "" { - if filter.ID, err = influxdb.IDFromString(b.org.id); err != nil { - return err - } - } else if b.org.name != "" { - filter.Name = &b.org.name - } - - // Retrieve a list of all matching organizations. - orgs, _, err := b.tenantService.FindOrganizations(ctx, filter) - if err != nil { - return err - } - - // Back up buckets in each matching organization. - for _, org := range orgs { - b.logger.Info("Backing up organization", zap.String("id", org.ID.String()), zap.String("name", org.Name)) - if err := b.backupBuckets(ctx, org); err != nil { - return err - } - } - return nil -} - -func (b *cmdBackupBuilder) backupBuckets(ctx context.Context, org *influxdb.Organization) (err error) { - // Build a filter if bucket ID or bucket name were specified. - var filter influxdb.BucketFilter - filter.OrganizationID = &org.ID - if b.bucketID != "" { - if filter.ID, err = influxdb.IDFromString(b.bucketID); err != nil { - return err - } - } else if b.bucketName != "" { - filter.Name = &b.bucketName - } - - // Retrieve a list of all matching organizations. - buckets, _, err := b.tenantService.FindBuckets(ctx, filter) - if err != nil { - return err - } - - // Back up shards in each matching bucket. - for _, bkt := range buckets { - if err := b.backupBucket(ctx, org, bkt); err != nil { - return err - } - } - return nil -} - -func (b *cmdBackupBuilder) backupBucket(ctx context.Context, org *influxdb.Organization, bkt *influxdb.Bucket) (err error) { - b.logger.Info("Backing up bucket", zap.String("id", bkt.ID.String()), zap.String("name", bkt.Name)) - - // Lookup matching database from the meta store. - dbi := b.metaClient.Database(bkt.ID.String()) - if dbi == nil { - return fmt.Errorf("bucket database not found: %s", bkt.ID.String()) - } - - // Iterate over and backup each shard. - for _, rpi := range dbi.RetentionPolicies { - for _, sg := range rpi.ShardGroups { - if sg.Deleted() { - continue - } - - for _, sh := range sg.Shards { - if err := b.backupShard(ctx, org, bkt, rpi.Name, sh.ID); influxdb.ErrorCode(err) == influxdb.ENotFound { - b.logger.Warn("Shard removed during backup", zap.Uint64("shard_id", sh.ID)) - continue - } else if err != nil { - return err - } - } - } - } - return nil -} - -// backupShard streams a tar of TSM data for shard. -func (b *cmdBackupBuilder) backupShard(ctx context.Context, org *influxdb.Organization, bkt *influxdb.Bucket, policy string, shardID uint64) error { - path := filepath.Join(b.path, b.shardPath(shardID)) - b.logger.Info("Backing up shard", zap.Uint64("id", shardID), zap.String("path", b.shardPath(shardID))) - - // Open writer to output file. - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - // Wrap file writer with a gzip writer. - gw := gzip.NewWriter(f) - defer gw.Close() - - // Stream file from server, sync, and ensure file closes correctly. - if err := b.backupService.BackupShard(ctx, gw, shardID, time.Time{}); err != nil { - return err - } else if err := gw.Close(); err != nil { - return err - } else if err := f.Sync(); err != nil { - return err - } else if err := f.Close(); err != nil { - return err - } - - // Determine file size. - fi, err := os.Stat(path) - if err != nil { - return err - } - - // Update manifest. - b.manifest.Files = append(b.manifest.Files, influxdb.ManifestEntry{ - OrganizationID: org.ID.String(), - OrganizationName: org.Name, - BucketID: bkt.ID.String(), - BucketName: bkt.Name, - ShardID: shardID, - FileName: b.shardPath(shardID), - Size: fi.Size(), - LastModified: fi.ModTime().UTC(), - }) - - return nil -} - -// writeManifest writes the manifest file out. -func (b *cmdBackupBuilder) writeManifest(ctx context.Context) error { - path := filepath.Join(b.path, b.manifestPath()) - b.logger.Info("Writing manifest", zap.String("path", b.manifestPath())) - - buf, err := json.MarshalIndent(b.manifest, "", " ") - if err != nil { - return fmt.Errorf("create manifest: %w", err) - } - buf = append(buf, '\n') - return ioutil.WriteFile(path, buf, 0600) -} - -func (b *cmdBackupBuilder) newCmd(use string, runE func(*cobra.Command, []string) error) *cobra.Command { - cmd := b.genericCLIOpts.newCmd(use, runE, true) - b.genericCLIOpts.registerPrintOptions(cmd) - b.globalFlags.registerFlags(b.viper, cmd) - return cmd -} diff --git a/cmd/influx/bucket.go b/cmd/influx/bucket.go deleted file mode 100644 index b5cdd8b47c9..00000000000 --- a/cmd/influx/bucket.go +++ /dev/null @@ -1,358 +0,0 @@ -package main - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/internal" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/spf13/cobra" -) - -type bucketSVCsFn func() (influxdb.BucketService, influxdb.OrganizationService, error) - -func cmdBucket(f *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdBucketBuilder(newBucketSVCs, f, opt) - return builder.cmd() -} - -type cmdBucketBuilder struct { - genericCLIOpts - *globalFlags - - svcFn bucketSVCsFn - - id string - hideHeaders bool - json bool - name string - description string - org organization - retention string -} - -func newCmdBucketBuilder(svcsFn bucketSVCsFn, f *globalFlags, opts genericCLIOpts) *cmdBucketBuilder { - return &cmdBucketBuilder{ - globalFlags: f, - genericCLIOpts: opts, - svcFn: svcsFn, - } -} - -func (b *cmdBucketBuilder) cmd() *cobra.Command { - cmd := b.newCmd("bucket", nil) - cmd.Short = "Bucket management commands" - cmd.TraverseChildren = true - cmd.Run = seeHelp - cmd.AddCommand( - b.cmdCreate(), - b.cmdDelete(), - b.cmdList(), - b.cmdUpdate(), - ) - - return cmd -} - -func (b *cmdBucketBuilder) cmdCreate() *cobra.Command { - cmd := b.newCmd("create", b.cmdCreateRunEFn) - cmd.Short = "Create bucket" - - opts := flagOpts{ - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "BUCKET_NAME", - Desc: "New bucket name", - Required: true, - }, - } - opts.mustRegister(b.viper, cmd) - - cmd.Flags().StringVarP(&b.description, "description", "d", "", "Description of bucket that will be created") - cmd.Flags().StringVarP(&b.retention, "retention", "r", "", "Duration bucket will retain data. 0 is infinite. Default is 0.") - b.org.register(b.viper, cmd, false) - b.registerPrintFlags(cmd) - - return cmd -} - -func (b *cmdBucketBuilder) cmdCreateRunEFn(*cobra.Command, []string) error { - if err := b.org.validOrgFlags(b.globalFlags); err != nil { - return err - } - - bktSVC, orgSVC, err := b.svcFn() - if err != nil { - return err - } - - dur, err := internal.RawDurationToTimeDuration(b.retention) - if err != nil { - return err - } - - bkt := &influxdb.Bucket{ - Name: b.name, - Description: b.description, - RetentionPeriod: dur, - } - bkt.OrgID, err = b.org.getID(orgSVC) - if err != nil { - return err - } - - if err := bktSVC.CreateBucket(context.Background(), bkt); err != nil { - return fmt.Errorf("failed to create bucket: %v", err) - } - - return b.printBuckets(bucketPrintOpt{bucket: bkt}) -} - -func (b *cmdBucketBuilder) cmdDelete() *cobra.Command { - cmd := b.newCmd("delete", b.cmdDeleteRunEFn) - cmd.Short = "Delete bucket" - - cmd.Flags().StringVarP(&b.id, "id", "i", "", "The bucket ID, required if name isn't provided") - cmd.Flags().StringVarP(&b.name, "name", "n", "", "The bucket name, org or org-id will be required by choosing this") - b.org.register(b.viper, cmd, false) - b.registerPrintFlags(cmd) - - return cmd -} - -func (b *cmdBucketBuilder) cmdDeleteRunEFn(cmd *cobra.Command, args []string) error { - bktSVC, _, err := b.svcFn() - if err != nil { - return err - } - - var id influxdb.ID - var filter influxdb.BucketFilter - if b.id == "" && b.name != "" { - if err = b.org.validOrgFlags(&flags); err != nil { - return err - } - filter.Name = &b.name - if b.org.id != "" { - if filter.OrganizationID, err = influxdb.IDFromString(b.org.id); err != nil { - return err - } - } else if b.org.name != "" { - filter.Org = &b.org.name - } - - } else if err := id.DecodeFromString(b.id); err != nil { - return fmt.Errorf("failed to decode bucket id %q: %v", b.id, err) - } - - if id.Valid() { - filter.ID = &id - } - - ctx := context.Background() - bkt, err := bktSVC.FindBucket(ctx, filter) - if err != nil { - return fmt.Errorf("failed to find bucket with id %q: %v", id, err) - } - if err := bktSVC.DeleteBucket(ctx, bkt.ID); err != nil { - return fmt.Errorf("failed to delete bucket with id %q: %v", id, err) - } - return b.printBuckets(bucketPrintOpt{ - deleted: true, - bucket: bkt, - }) -} - -func (b *cmdBucketBuilder) cmdList() *cobra.Command { - cmd := b.newCmd("list", b.cmdListRunEFn) - cmd.Short = "List buckets" - cmd.Aliases = []string{"find", "ls"} - - opts := flagOpts{ - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "BUCKET_NAME", - Desc: "The bucket name", - }, - } - opts.mustRegister(b.viper, cmd) - - b.org.register(b.viper, cmd, false) - b.registerPrintFlags(cmd) - cmd.Flags().StringVarP(&b.id, "id", "i", "", "The bucket ID") - - return cmd -} - -func (b *cmdBucketBuilder) cmdListRunEFn(cmd *cobra.Command, args []string) error { - if err := b.org.validOrgFlags(b.globalFlags); err != nil { - return err - } - - bktSVC, _, err := b.svcFn() - if err != nil { - return err - } - - var filter influxdb.BucketFilter - if b.name != "" { - filter.Name = &b.name - } - if b.id != "" { - id, err := influxdb.IDFromString(b.id) - if err != nil { - return fmt.Errorf("failed to decode bucket id %q: %v", b.id, err) - } - filter.ID = id - } - if b.org.id != "" { - orgID, err := influxdb.IDFromString(b.org.id) - if err != nil { - return fmt.Errorf("failed to decode org id %q: %v", b.org.id, err) - } - filter.OrganizationID = orgID - } - if b.org.name != "" { - filter.Org = &b.org.name - } - - buckets, _, err := bktSVC.FindBuckets(context.Background(), filter) - if err != nil { - return fmt.Errorf("failed to retrieve buckets: %s", err) - } - - return b.printBuckets(bucketPrintOpt{ - buckets: buckets, - }) -} - -func (b *cmdBucketBuilder) cmdUpdate() *cobra.Command { - cmd := b.newCmd("update", b.cmdUpdateRunEFn) - cmd.Short = "Update bucket" - - opts := flagOpts{ - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "BUCKET_NAME", - Desc: "New bucket name", - }, - } - opts.mustRegister(b.viper, cmd) - - b.registerPrintFlags(cmd) - cmd.Flags().StringVarP(&b.id, "id", "i", "", "The bucket ID (required)") - cmd.Flags().StringVarP(&b.description, "description", "d", "", "Description of bucket that will be created") - cmd.MarkFlagRequired("id") - cmd.Flags().StringVarP(&b.retention, "retention", "r", "", "Duration bucket will retain data. 0 is infinite. Default is 0.") - - return cmd -} - -func (b *cmdBucketBuilder) cmdUpdateRunEFn(cmd *cobra.Command, args []string) error { - bktSVC, _, err := b.svcFn() - if err != nil { - return err - } - - var id influxdb.ID - if err := id.DecodeFromString(b.id); err != nil { - return fmt.Errorf("failed to decode bucket id %q: %v", b.id, err) - } - - var update influxdb.BucketUpdate - if b.name != "" { - update.Name = &b.name - } - if b.description != "" { - update.Description = &b.description - } - - dur, err := internal.RawDurationToTimeDuration(b.retention) - if err != nil { - return err - } - if dur != 0 { - update.RetentionPeriod = &dur - } - - bkt, err := bktSVC.UpdateBucket(context.Background(), id, update) - if err != nil { - return fmt.Errorf("failed to update bucket: %v", err) - } - - return b.printBuckets(bucketPrintOpt{bucket: bkt}) -} - -func (b *cmdBucketBuilder) newCmd(use string, runE func(*cobra.Command, []string) error) *cobra.Command { - cmd := b.genericCLIOpts.newCmd(use, runE, true) - b.globalFlags.registerFlags(b.viper, cmd) - return cmd -} - -func (b *cmdBucketBuilder) registerPrintFlags(cmd *cobra.Command) { - registerPrintOptions(b.viper, cmd, &b.hideHeaders, &b.json) -} - -type bucketPrintOpt struct { - deleted bool - bucket *influxdb.Bucket - buckets []*influxdb.Bucket -} - -func (b *cmdBucketBuilder) printBuckets(printOpt bucketPrintOpt) error { - if b.json { - var v interface{} = printOpt.buckets - if printOpt.buckets == nil { - v = printOpt.bucket - } - return b.writeJSON(v) - } - - w := b.newTabWriter() - defer w.Flush() - - w.HideHeaders(b.hideHeaders) - - headers := []string{"ID", "Name", "Retention", "Organization ID"} - if printOpt.deleted { - headers = append(headers, "Deleted") - } - w.WriteHeaders(headers...) - - if printOpt.bucket != nil { - printOpt.buckets = append(printOpt.buckets, printOpt.bucket) - } - - for _, bkt := range printOpt.buckets { - m := map[string]interface{}{ - "ID": bkt.ID.String(), - "Name": bkt.Name, - "Retention": bkt.RetentionPeriod, - "Organization ID": bkt.OrgID.String(), - } - if printOpt.deleted { - m["Deleted"] = true - } - w.Write(m) - } - - return nil -} - -func newBucketSVCs() (influxdb.BucketService, influxdb.OrganizationService, error) { - httpClient, err := newHTTPClient() - if err != nil { - return nil, nil, err - } - - orgSvc := &tenant.OrgClientService{Client: httpClient} - - return &tenant.BucketClientService{Client: httpClient}, orgSvc, nil -} diff --git a/cmd/influx/bucket_test.go b/cmd/influx/bucket_test.go deleted file mode 100644 index a1763121da0..00000000000 --- a/cmd/influx/bucket_test.go +++ /dev/null @@ -1,481 +0,0 @@ -package main - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/mock" - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCmdBucket(t *testing.T) { - orgID := influxdb.ID(9000) - - fakeSVCFn := func(svc influxdb.BucketService) bucketSVCsFn { - return func() (influxdb.BucketService, influxdb.OrganizationService, error) { - return svc, &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ID: orgID, Name: "influxdata"}, nil - }, - }, nil - } - } - - t.Run("create", func(t *testing.T) { - tests := []struct { - name string - expectedBucket influxdb.Bucket - flags []string - envVars map[string]string - }{ - { - name: "basic just name", - flags: []string{"--name=new name", "--org=org name"}, - expectedBucket: influxdb.Bucket{ - Name: "new name", - OrgID: orgID, - }, - }, - { - name: "with description and retention period", - flags: []string{ - "--name=new name", - "--description=desc", - "--retention=1m", - "--org=org name", - }, - expectedBucket: influxdb.Bucket{ - Name: "new name", - Description: "desc", - RetentionPeriod: time.Minute, - OrgID: orgID, - }, - }, - { - name: "shorts", - flags: []string{ - "-n=new name", - "-d=desc", - "-r=1m", - "-o=org name", - }, - expectedBucket: influxdb.Bucket{ - Name: "new name", - Description: "desc", - RetentionPeriod: time.Minute, - OrgID: orgID, - }, - }, - { - name: "env vars", - flags: []string{ - "-d=desc", - "-r=1m", - "-o=org name", - }, - envVars: map[string]string{"INFLUX_BUCKET_NAME": "new name"}, - expectedBucket: influxdb.Bucket{ - Name: "new name", - Description: "desc", - RetentionPeriod: time.Minute, - OrgID: orgID, - }, - }, - } - - cmdFn := func(expectedBkt influxdb.Bucket) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := mock.NewBucketService() - svc.CreateBucketFn = func(ctx context.Context, bucket *influxdb.Bucket) error { - if expectedBkt != *bucket { - return fmt.Errorf("unexpected bucket;\n\twant= %+v\n\tgot= %+v", expectedBkt, *bucket) - } - return nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - return newCmdBucketBuilder(fakeSVCFn(svc), g, opt).cmd() - } - } - - for _, tt := range tests { - fn := func(t *testing.T) { - defer addEnvVars(t, tt.envVars)() - - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.expectedBucket)) - cmd.SetArgs(append([]string{"bucket", "create"}, tt.flags...)) - - require.NoError(t, cmd.Execute()) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("delete", func(t *testing.T) { - tests := []struct { - name string - expectedID influxdb.ID - flags []string - }{ - { - name: "with description and retention period", - expectedID: influxdb.ID(1), - flags: []string{"--id=" + influxdb.ID(1).String()}, - }, - { - name: "shorts", - expectedID: influxdb.ID(1), - flags: []string{"-i=" + influxdb.ID(1).String()}, - }, - { - name: "with name and org name", - expectedID: influxdb.ID(1), - flags: []string{"--name=n1", "--org=org1"}, - }, - { - name: "with name and org name short", - expectedID: influxdb.ID(1), - flags: []string{"-n=n1", "-o=org1"}, - }, - { - name: "with name and org id", - expectedID: influxdb.ID(1), - flags: []string{"--name=n1", "--org-id=" + influxdb.ID(3).String()}, - }, - } - - cmdFn := func(expectedID influxdb.ID) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := mock.NewBucketService() - svc.FindBucketByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ID: id}, nil - } - svc.FindBucketFn = func(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - if filter.ID != nil { - return &influxdb.Bucket{ID: *filter.ID}, nil - } - if filter.Name != nil { - return &influxdb.Bucket{ID: expectedID}, nil - } - return nil, nil - } - svc.DeleteBucketFn = func(ctx context.Context, id influxdb.ID) error { - if expectedID != id { - return fmt.Errorf("unexpected id:\n\twant= %s\n\tgot= %s", expectedID, id) - } - return nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - return newCmdBucketBuilder(fakeSVCFn(svc), g, opt).cmd() - } - } - - for _, tt := range tests { - fn := func(t *testing.T) { - defer addEnvVars(t, envVarsZeroMap)() - - outBuf := new(bytes.Buffer) - defer func() { - if t.Failed() && outBuf.Len() > 0 { - t.Log(outBuf.String()) - } - }() - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(outBuf), - ) - - cmd := builder.cmd(cmdFn(tt.expectedID)) - cmd.SetArgs(append([]string{"bucket", "delete"}, tt.flags...)) - - require.NoError(t, cmd.Execute()) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("list", func(t *testing.T) { - type called struct { - name string - id influxdb.ID - orgID influxdb.ID - org string - } - - tests := []struct { - name string - expected called - flags []string - command string - envVars map[string]string - }{ - { - name: "org id", - flags: []string{"--org-id=" + influxdb.ID(3).String()}, - envVars: envVarsZeroMap, - expected: called{orgID: 3}, - }, - { - name: "id", - flags: []string{ - "--id=" + influxdb.ID(2).String(), - "--org-id=" + influxdb.ID(3).String(), - }, - envVars: envVarsZeroMap, - expected: called{ - id: 2, - orgID: 3, - }, - }, - { - name: "org", - flags: []string{"--org=rg"}, - envVars: envVarsZeroMap, - expected: called{org: "rg"}, - }, - { - name: "name", - flags: []string{"--org=rg", "--name=name1"}, - envVars: envVarsZeroMap, - expected: called{org: "rg", name: "name1"}, - }, - { - name: "shorts", - flags: []string{ - "-o=rg", - "-n=name1", - "-i=" + influxdb.ID(1).String(), - }, - envVars: envVarsZeroMap, - expected: called{org: "rg", name: "name1", id: 1}, - }, - { - name: "env vars", - envVars: map[string]string{ - "INFLUX_ORG": "rg", - "INFLUX_BUCKET_NAME": "name1", - }, - flags: []string{"-i=" + influxdb.ID(1).String()}, - expected: called{org: "rg", name: "name1", id: 1}, - }, - { - name: "env vars 2", - envVars: map[string]string{ - "INFLUX_ORG": "", - "INFLUX_ORG_ID": influxdb.ID(2).String(), - "INFLUX_BUCKET_NAME": "name1", - }, - flags: []string{"-i=" + influxdb.ID(1).String()}, - expected: called{orgID: 2, name: "name1", id: 1}, - }, - { - name: "ls alias", - command: "ls", - envVars: envVarsZeroMap, - flags: []string{"--org-id=" + influxdb.ID(3).String()}, - expected: called{orgID: 3}, - }, - { - name: "find alias", - command: "find", - envVars: envVarsZeroMap, - flags: []string{"--org-id=" + influxdb.ID(3).String()}, - expected: called{orgID: 3}, - }, - } - - cmdFn := func() (func(*globalFlags, genericCLIOpts) *cobra.Command, *called) { - calls := new(called) - - svc := mock.NewBucketService() - svc.FindBucketsFn = func(ctx context.Context, f influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - if f.ID != nil { - calls.id = *f.ID - } - if f.OrganizationID != nil { - calls.orgID = *f.OrganizationID - } - if f.Name != nil { - calls.name = *f.Name - } - if f.Org != nil { - calls.org = *f.Org - } - return nil, 0, nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - return newCmdBucketBuilder(fakeSVCFn(svc), g, opt).cmd() - }, calls - } - - for _, tt := range tests { - fn := func(t *testing.T) { - defer addEnvVars(t, tt.envVars)() - - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - - cmdFn, calls := cmdFn() - cmd := builder.cmd(cmdFn) - - if tt.command == "" { - tt.command = "list" - } - - cmd.SetArgs(append([]string{"bucket", tt.command}, tt.flags...)) - - require.NoError(t, cmd.Execute()) - assert.Equal(t, tt.expected, *calls) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("update", func(t *testing.T) { - tests := []struct { - name string - expected influxdb.BucketUpdate - flags []string - envVars map[string]string - }{ - { - name: "basic just name", - flags: []string{ - "--id=" + influxdb.ID(3).String(), - "--name=new name", - }, - expected: influxdb.BucketUpdate{ - Name: strPtr("new name"), - }, - }, - { - name: "with all fields", - flags: []string{ - "--id=" + influxdb.ID(3).String(), - "--name=new name", - "--description=desc", - "--retention=1m", - }, - expected: influxdb.BucketUpdate{ - Name: strPtr("new name"), - Description: strPtr("desc"), - RetentionPeriod: durPtr(time.Minute), - }, - }, - { - name: "shorts", - flags: []string{ - "-i=" + influxdb.ID(3).String(), - "-n=new name", - "-d=desc", - "-r=1m", - }, - expected: influxdb.BucketUpdate{ - Name: strPtr("new name"), - Description: strPtr("desc"), - RetentionPeriod: durPtr(time.Minute), - }, - }, - { - name: "env var", - flags: []string{ - "-i=" + influxdb.ID(3).String(), - "-d=desc", - "-r=1m", - }, - envVars: map[string]string{"INFLUX_BUCKET_NAME": "new name"}, - expected: influxdb.BucketUpdate{ - Name: strPtr("new name"), - Description: strPtr("desc"), - RetentionPeriod: durPtr(time.Minute), - }, - }, - } - - cmdFn := func(expectedUpdate influxdb.BucketUpdate) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := mock.NewBucketService() - svc.UpdateBucketFn = func(ctx context.Context, id influxdb.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - if id != 3 { - return nil, fmt.Errorf("unexpecte id:\n\twant= %s\n\tgot= %s", influxdb.ID(3), id) - } - if !reflect.DeepEqual(expectedUpdate, upd) { - return nil, fmt.Errorf("unexpected bucket update;\n\twant= %+v\n\tgot= %+v", expectedUpdate, upd) - } - return &influxdb.Bucket{}, nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - return newCmdBucketBuilder(fakeSVCFn(svc), g, opt).cmd() - } - } - - for _, tt := range tests { - fn := func(t *testing.T) { - defer addEnvVars(t, tt.envVars)() - - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - - cmd := builder.cmd(cmdFn(tt.expected)) - - cmd.SetArgs(append([]string{"bucket", "update"}, tt.flags...)) - require.NoError(t, cmd.Execute()) - } - - t.Run(tt.name, fn) - } - }) -} - -func strPtr(s string) *string { - return &s -} - -func durPtr(d time.Duration) *time.Duration { - return &d -} - -func addEnvVars(t *testing.T, envVars map[string]string) func() { - t.Helper() - - var initialEnvVars []struct{ key, val string } - for key, val := range envVars { - if k := os.Getenv(key); k != "" { - initialEnvVars = append(initialEnvVars, struct{ key, val string }{ - key: key, - val: k, - }) - } - - require.NoError(t, os.Setenv(key, val)) - } - return func() { - for key := range envVars { - require.NoError(t, os.Unsetenv(key)) - } - - for _, envVar := range initialEnvVars { - require.NoError(t, os.Setenv(envVar.key, envVar.val)) - } - } -} diff --git a/cmd/influx/completion.go b/cmd/influx/completion.go deleted file mode 100644 index 06153588ce0..00000000000 --- a/cmd/influx/completion.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "io" - - "github.com/spf13/cobra" -) - -func completionCmd(rootCmd *cobra.Command) *cobra.Command { - writeZSH := func(w io.Writer) error { - if err := rootCmd.GenZshCompletion(w); err != nil { - return err - } - _, err := io.WriteString(w, "\ncompdef _influx influx\n") - return err - } - - return &cobra.Command{ - Use: "completion [bash|zsh]", - Short: "Generates completion scripts", - Args: cobra.ExactValidArgs(1), - ValidArgs: []string{"bash", "zsh", "powershell"}, - Long: ` - Outputs shell completion for the given shell (bash or zsh) - - OS X: - $ source $(brew --prefix)/etc/bash_completion # for bash users - $ source <(influx completion bash) # for bash users - $ source <(influx completion zsh) # for zsh users - - Ubuntu: - $ source /etc/bash-completion # for bash users - $ source <(influx completion bash) # for bash users - $ source <(influx completion zsh) # for zsh users - - Additionally, you may want to add this to your .bashrc/.zshrc -`, - RunE: func(cmd *cobra.Command, args []string) error { - writer := rootCmd.OutOrStdout() - switch args[0] { - case "bash": - return rootCmd.GenBashCompletion(writer) - case "powershell": - return rootCmd.GenPowerShellCompletion(writer) - case "zsh": - return writeZSH(writer) - } - return nil - }, - } -} diff --git a/cmd/influx/config.go b/cmd/influx/config.go deleted file mode 100644 index 6a74f3a6a2f..00000000000 --- a/cmd/influx/config.go +++ /dev/null @@ -1,396 +0,0 @@ -package main - -import ( - "errors" - "net/url" - "path/filepath" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influx/config" - "github.com/spf13/cobra" -) - -func cmdConfig(f *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: f, - svcFn: newConfigService, - } - return builder.cmd() -} - -type cmdConfigBuilder struct { - genericCLIOpts - *globalFlags - - name string - url string - token string - active bool - org string - - json bool - hideHeaders bool - - svcFn func(path string) config.Service -} - -func (b *cmdConfigBuilder) cmd() *cobra.Command { - cmd := b.newCmd("config [config name]", b.cmdSwitchActiveRunEFn, false) - cmd.Args = cobra.ArbitraryArgs - cmd.Short = "Config management commands" - cmd.Long = ` - Providing no argument to the config command will print the active configuration. When - an argument is provided, the active config will be switched to the config with a name - matching that of the argument provided. - - Examples: - # show active config - influx config - - # set active config to previously active config - influx config - - - # set active config - influx config $CONFIG_NAME - - The influx config command displays the active InfluxDB connection configuration and - manages multiple connection configurations stored, by default, in ~/.influxdbv2/configs. - Each connection includes a URL, token, associated organization, and active setting. - InfluxDB reads the token from the active connection configuration, so you don't have - to manually enter a token to log into InfluxDB. - - For information about the config command, see - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config -` - - b.registerFilepath(cmd) - cmd.AddCommand( - b.cmdCreate(), - b.cmdDelete(), - b.cmdUpdate(), - b.cmdList(), - ) - return cmd -} - -func (b *cmdConfigBuilder) cmdSwitchActiveRunEFn(cmd *cobra.Command, args []string) error { - svc := b.newConfigSVC() - - if len(args) > 0 { - cfg, err := svc.SwitchActive(args[0]) - if err != nil { - return err - } - - return b.printConfigs(configPrintOpts{ - config: cfg, - }) - } - - configs, err := svc.ListConfigs() - if err != nil { - return err - } - - var active config.Config - for _, cfg := range configs { - if cfg.Active { - active = cfg - break - } - } - if !active.Active { - return nil - } - - return b.printConfigs(configPrintOpts{ - config: active, - }) -} - -func (b *cmdConfigBuilder) cmdCreate() *cobra.Command { - cmd := b.newCmd("create", b.cmdCreateRunEFn, false) - cmd.Short = "Create config" - cmd.Long = ` - The influx config create command creates a new InfluxDB connection configuration - and stores it in the configs file (by default, stored at ~/.influxdbv2/configs). - - Examples: - # create a config and set it active - influx config create -a -n $CFG_NAME -u $HOST_URL -t $TOKEN -o $ORG_NAME - - # create a config and without setting it active - influx config create -n $CFG_NAME -u $HOST_URL -t $TOKEN -o $ORG_NAME - - For information about the config command, see - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config - and - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config/create` - - b.registerFilepath(cmd) - b.registerPrintFlags(cmd) - b.registerConfigSettingFlags(cmd) - cmd.MarkFlagRequired("token") - cmd.MarkFlagRequired("host-url") - return cmd -} - -func (b *cmdConfigBuilder) cmdCreateRunEFn(*cobra.Command, []string) error { - svc := b.newConfigSVC() - - host, err := b.getValidHostURL() - if err != nil { - return err - } - - cfg, err := svc.CreateConfig(config.Config{ - Name: b.name, - Host: host, - Token: b.token, - Org: b.org, - Active: b.active, - }) - if err != nil { - return err - } - - return b.printConfigs(configPrintOpts{ - config: cfg, - }) -} - -func (b *cmdConfigBuilder) cmdDelete() *cobra.Command { - cmd := b.newCmd("rm [cfg_name]", b.cmdDeleteRunEFn, false) - cmd.Aliases = []string{"delete", "remove"} - cmd.Args = cobra.ArbitraryArgs - cmd.Short = "Delete config" - cmd.Long = ` - The influx config delete command deletes an InfluxDB connection configuration from - the configs file (by default, stored at ~/.influxdbv2/configs). - - Examples: - # delete a config - influx config rm $CFG_NAME - - # delete multiple configs - influx config rm $CFG_NAME_1 $CFG_NAME_2 - - For information about the config command, see - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config - and - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config/remove` - - b.registerPrintFlags(cmd) - cmd.Flags().StringVarP(&b.name, "name", "n", "", "The config name (required)") - cmd.Flags().MarkDeprecated("name", "provide the name as an arg; example: influx config rm $CFG_NAME") - - return cmd -} - -func (b *cmdConfigBuilder) cmdDeleteRunEFn(cmd *cobra.Command, args []string) error { - svc := b.newConfigSVC() - - deletedConfigs := make(config.Configs) - for _, name := range append(args, b.name) { - if name == "" { - continue - } - - cfg, err := svc.DeleteConfig(name) - if influxdb.ErrorCode(err) == influxdb.ENotFound { - continue - } - if err != nil { - return err - } - deletedConfigs[name] = cfg - } - - return b.printConfigs(configPrintOpts{ - delete: true, - configs: deletedConfigs, - }) -} - -func (b *cmdConfigBuilder) cmdUpdate() *cobra.Command { - cmd := b.newCmd("set", b.cmdUpdateRunEFn, false) - cmd.Aliases = []string{"update"} - cmd.Short = "Update config" - cmd.Long = ` - The influx config set command updates information in an InfluxDB connection - configuration in the configs file (by default, stored at ~/.influxdbv2/configs). - - Examples: - # update a config and set active - influx config set -a -n $CFG_NAME -u $HOST_URL -t $TOKEN -o $ORG_NAME - - # update a config and do not set to active - influx config set -n $CFG_NAME -u $HOST_URL -t $TOKEN -o $ORG_NAME - - For information about the config command, see - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config - and - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config/set` - - b.registerPrintFlags(cmd) - b.registerConfigSettingFlags(cmd) - return cmd -} - -func (b *cmdConfigBuilder) cmdUpdateRunEFn(*cobra.Command, []string) error { - var host string - if b.url != "" { - h, err := b.getValidHostURL() - if err != nil { - return err - } - host = h - } - - cfg, err := b.newConfigSVC().UpdateConfig(config.Config{ - Name: b.name, - Host: host, - Token: b.token, - Org: b.org, - Active: b.active, - }) - if err != nil { - return err - } - - return b.printConfigs(configPrintOpts{ - config: cfg, - }) -} - -func (b *cmdConfigBuilder) cmdList() *cobra.Command { - cmd := b.newCmd("ls", b.cmdListRunEFn, false) - cmd.Aliases = []string{"list"} - cmd.Short = "List configs" - cmd.Long = ` - The influx config ls command lists all InfluxDB connection configurations - in the configs file (by default, stored at ~/.influxdbv2/configs). Each - connection configuration includes a URL, authentication token, and active - setting. An asterisk (*) indicates the active configuration. - - Examples: - # list configs - influx config ls - - # list configs with long alias - influx config list - - For information about the config command, see - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config - and - https://v2.docs.influxdata.com/v2.0/reference/cli/influx/config/list` - b.registerPrintFlags(cmd) - return cmd -} - -func (b *cmdConfigBuilder) cmdListRunEFn(*cobra.Command, []string) error { - cfgs, err := b.newConfigSVC().ListConfigs() - if err != nil { - return err - } - - return b.printConfigs(configPrintOpts{configs: cfgs}) -} - -func (b *cmdConfigBuilder) registerConfigSettingFlags(cmd *cobra.Command) { - cmd.Flags().StringVarP(&b.name, "config-name", "n", "", "The config name (required)") - // name is required everywhere - cmd.MarkFlagRequired("config-name") - - cmd.Flags().BoolVarP(&b.active, "active", "a", false, "Set as active config") - cmd.Flags().StringVarP(&b.url, "host-url", "u", "", "The host url (required)") - cmd.Flags().StringVarP(&b.org, "org", "o", "", "The optional organization name") - cmd.Flags().StringVarP(&b.token, "token", "t", "", "The token for host (required)") - - // deprecated moving forward, not explicit enough based on feedback - // the short flags will still be respected but their long form is different. - cmd.Flags().StringVar(&b.name, "name", "", "The config name (required)") - cmd.Flags().MarkDeprecated("name", "use the --config-name flag") - cmd.Flags().StringVar(&b.url, "url", "", "The host url (required)") - cmd.Flags().MarkDeprecated("url", "use the --host-url flag") -} - -func (b *cmdConfigBuilder) registerFilepath(cmd *cobra.Command) { - b.globalFlags.registerFlags(b.viper, cmd, "host", "token", "skip-verify", "trace-debug-id") -} - -func (b *cmdConfigBuilder) registerPrintFlags(cmd *cobra.Command) { - registerPrintOptions(b.viper, cmd, &b.hideHeaders, &b.json) -} - -func (b *cmdConfigBuilder) printConfigs(opts configPrintOpts) error { - if b.json { - var v interface{} = opts.configs - if opts.configs == nil { - v = opts.config - } - return b.writeJSON(v) - } - - w := b.newTabWriter() - defer w.Flush() - - w.HideHeaders(b.hideHeaders) - - headers := []string{"Active", "Name", "URL", "Org"} - if opts.delete { - headers = append(headers, "Deleted") - } - w.WriteHeaders(headers...) - - if opts.configs == nil { - opts.configs = config.Configs{ - opts.config.Name: opts.config, - } - } - for _, c := range opts.configs { - var active string - if c.Active { - active = "*" - } - m := map[string]interface{}{ - "Active": active, - "Name": c.Name, - "URL": c.Host, - "Org": c.Org, - } - if opts.delete { - m["Deleted"] = true - } - - w.Write(m) - } - - return nil -} - -func (b *cmdConfigBuilder) getValidHostURL() (string, error) { - u, err := url.Parse(b.url) - if err != nil { - return "", err - } - if u.Scheme != "http" && u.Scheme != "https" { - return "", errors.New("a scheme of HTTP(S) must be provided for host url") - } - return u.String(), nil -} - -func (b *cmdConfigBuilder) newConfigSVC() config.Service { - return b.svcFn(b.globalFlags.filepath) -} - -func newConfigService(path string) config.Service { - return config.NewLocalConfigSVC(path, filepath.Dir(path)) -} - -type configPrintOpts struct { - delete bool - config config.Config - configs config.Configs -} diff --git a/cmd/influx/config/config.go b/cmd/influx/config/config.go deleted file mode 100644 index d78895b923d..00000000000 --- a/cmd/influx/config/config.go +++ /dev/null @@ -1,353 +0,0 @@ -package config - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/BurntSushi/toml" - "github.com/influxdata/influxdb/v2" -) - -// Config store the crendentials of influxdb host and token. -type Config struct { - Name string `toml:"-" json:"-"` - Host string `toml:"url" json:"url"` - // Token is base64 encoded sequence. - Token string `toml:"token" json:"token"` - Org string `toml:"org" json:"org"` - Active bool `toml:"active,omitempty" json:"active,omitempty"` - PreviousActive bool `toml:"previous,omitempty" json:"previous,omitempty"` -} - -// DefaultConfig is default config without token -var DefaultConfig = Config{ - Name: "default", - Host: "http://localhost:8086", - Active: true, -} - -// Configs is map of configs indexed by name. -type Configs map[string]Config - -// Service is the service to list and write configs. -type Service interface { - CreateConfig(Config) (Config, error) - DeleteConfig(name string) (Config, error) - UpdateConfig(Config) (Config, error) - SwitchActive(name string) (Config, error) - ListConfigs() (Configs, error) -} - -// store is the embedded store of the Config service. -type store interface { - parsePreviousActive() (Config, error) - ListConfigs() (Configs, error) - writeConfigs(cfgs Configs) error -} - -// Switch to another config. -func (cfgs Configs) Switch(name string) error { - if _, ok := cfgs[name]; !ok { - return &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: fmt.Sprintf(`config %q is not found`, name), - } - } - for k, v := range cfgs { - v.PreviousActive = v.Active && (k != name) - v.Active = k == name - cfgs[k] = v - } - return nil -} - -func (cfgs Configs) Active() Config { - for _, cfg := range cfgs { - if cfg.Active { - return cfg - } - } - if len(cfgs) > 0 { - for _, cfg := range cfgs { - return cfg - } - } - return DefaultConfig -} - -// localConfigsSVC has the path and dir to write and parse configs. -type localConfigsSVC struct { - store -} - -type ioStore struct { - Path string - Dir string -} - -// newConfigsSVC create a new localConfigsSVC. -func newConfigsSVC(s store) localConfigsSVC { - return localConfigsSVC{ - store: s, - } -} - -// NewLocalConfigSVC create a new local config svc. -func NewLocalConfigSVC(path, dir string) Service { - return newConfigsSVC(ioStore{ - Path: path, - Dir: dir, - }) -} - -// ListConfigs from the local path. -func (s ioStore) ListConfigs() (Configs, error) { - r, err := os.Open(s.Path) - if err != nil { - return make(Configs), nil - } - return (baseRW{r: r}).ListConfigs() -} - -// parsePreviousActive from the local path. -func (s ioStore) parsePreviousActive() (Config, error) { - r, err := os.Open(s.Path) - if err != nil { - return Config{}, nil - } - return (baseRW{r: r}).parsePreviousActive() -} - -var badNames = map[string]bool{ - "-": false, - "list": false, - "update": false, - "set": false, - "delete": false, - "switch": false, - "create": false, -} - -func blockBadName(cfgs Configs) error { - for n := range cfgs { - if _, ok := badNames[n]; ok { - return &influxdb.Error{ - Code: influxdb.EInvalid, - Msg: fmt.Sprintf(`%q is not a valid config name`, n), - } - } - } - return nil -} - -type baseRW struct { - r io.Reader - w io.Writer -} - -func (s baseRW) writeConfigs(cfgs Configs) error { - if err := blockBadName(cfgs); err != nil { - return err - } - var b2 bytes.Buffer - if err := toml.NewEncoder(s.w).Encode(cfgs); err != nil { - return err - } - // a list cloud 2 clusters, commented out - s.w.Write([]byte("# \n")) - cfgs = map[string]Config{ - "us-central": {Host: "https://us-central1-1.gcp.cloud2.influxdata.com", Token: "XXX"}, - "us-west": {Host: "https://us-west-2-1.aws.cloud2.influxdata.com", Token: "XXX"}, - "eu-central": {Host: "https://eu-central-1-1.aws.cloud2.influxdata.com", Token: "XXX"}, - } - - if err := toml.NewEncoder(&b2).Encode(cfgs); err != nil { - return err - } - reader := bufio.NewReader(&b2) - for { - line, _, err := reader.ReadLine() - - if err == io.EOF { - break - } - s.w.Write([]byte("# " + string(line) + "\n")) - } - return nil -} - -// ListConfigs decodes configs from io readers -func (s baseRW) ListConfigs() (Configs, error) { - cfgs := make(Configs) - _, err := toml.DecodeReader(s.r, &cfgs) - for n, cfg := range cfgs { - cfg.Name = n - cfgs[n] = cfg - } - return cfgs, err -} - -// CreateConfig create new config. -func (svc localConfigsSVC) CreateConfig(cfg Config) (Config, error) { - if cfg.Name == "" { - return Config{}, &influxdb.Error{ - Code: influxdb.EInvalid, - Msg: "config name is empty", - } - } - cfgs, err := svc.ListConfigs() - if err != nil { - return Config{}, err - } - if _, ok := cfgs[cfg.Name]; ok { - return Config{}, &influxdb.Error{ - Code: influxdb.EConflict, - Msg: fmt.Sprintf("config %q already exists", cfg.Name), - } - } - cfgs[cfg.Name] = cfg - if cfg.Active { - if err := cfgs.Switch(cfg.Name); err != nil { - return Config{}, err - } - } - - return cfgs[cfg.Name], svc.writeConfigs(cfgs) -} - -// DeleteConfig will delete a config. -func (svc localConfigsSVC) DeleteConfig(name string) (Config, error) { - cfgs, err := svc.ListConfigs() - if err != nil { - return Config{}, err - } - - p, ok := cfgs[name] - if !ok { - return Config{}, &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: fmt.Sprintf("config %q is not found", name), - } - } - delete(cfgs, name) - - if p.Active && len(cfgs) > 0 { - for name, cfg := range cfgs { - cfg.Active = true - cfgs[name] = cfg - break - } - } - - return p, svc.writeConfigs(cfgs) -} - -// SwitchActive will active the config by name, if name is "-", active the previous one. -func (svc localConfigsSVC) SwitchActive(name string) (Config, error) { - var up Config - if name == "-" { - p0, err := svc.parsePreviousActive() - if err != nil { - return Config{}, err - } - up.Name = p0.Name - } else { - up.Name = name - } - up.Active = true - return svc.UpdateConfig(up) -} - -// UpdateConfig will update the config. -func (svc localConfigsSVC) UpdateConfig(up Config) (Config, error) { - cfgs, err := svc.ListConfigs() - if err != nil { - return Config{}, err - } - p0, ok := cfgs[up.Name] - if !ok { - return Config{}, &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: fmt.Sprintf("config %q is not found", up.Name), - } - } - if up.Token != "" { - p0.Token = up.Token - } - if up.Host != "" { - p0.Host = up.Host - } - if up.Org != "" { - p0.Org = up.Org - } - - cfgs[up.Name] = p0 - if up.Active { - if err := cfgs.Switch(up.Name); err != nil { - return Config{}, err - } - } - - return cfgs[up.Name], svc.writeConfigs(cfgs) -} - -// writeConfigs to the path. -func (s ioStore) writeConfigs(cfgs Configs) error { - if err := os.MkdirAll(s.Dir, os.ModePerm); err != nil { - return err - } - var b1 bytes.Buffer - if err := (baseRW{w: &b1}).writeConfigs(cfgs); err != nil { - return err - } - return ioutil.WriteFile(s.Path, b1.Bytes(), 0600) -} - -// parsePreviousActive return the previous active config from the reader -func (s baseRW) parsePreviousActive() (Config, error) { - return s.parseActiveConfig(false) -} - -// ParseActiveConfig returns the active config from the reader. -func ParseActiveConfig(r io.Reader) (Config, error) { - return (baseRW{r: r}).parseActiveConfig(true) -} - -func (s baseRW) parseActiveConfig(currentOrPrevious bool) (Config, error) { - previousText := "" - if !currentOrPrevious { - previousText = "previous " - } - cfgs, err := s.ListConfigs() - if err != nil { - return DefaultConfig, err - } - var activated Config - var hasActive bool - for _, cfg := range cfgs { - check := cfg.Active - if !currentOrPrevious { - check = cfg.PreviousActive - } - if check && !hasActive { - activated = cfg - hasActive = true - } else if check { - return DefaultConfig, &influxdb.Error{ - Code: influxdb.EConflict, - Msg: "more than one " + previousText + "activated configs found", - } - } - } - if hasActive { - return activated, nil - } - return DefaultConfig, &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: previousText + "activated config is not found", - } -} diff --git a/cmd/influx/config/config_test.go b/cmd/influx/config/config_test.go deleted file mode 100644 index cf822ab9faa..00000000000 --- a/cmd/influx/config/config_test.go +++ /dev/null @@ -1,833 +0,0 @@ -package config - -import ( - "bytes" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - influxtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestWriteConfigs(t *testing.T) { - cases := []struct { - name string - err *influxdb.Error - pp Configs - result string - }{ - { - name: "bad name -", - err: &influxdb.Error{ - Code: influxdb.EInvalid, - Msg: `"-" is not a valid config name`, - }, - }, - { - name: "bad name create", - err: &influxdb.Error{ - Code: influxdb.EInvalid, - Msg: `"create" is not a valid config name`, - }, - }, - { - name: "new config", - pp: Configs{ - "default": Config{ - Token: "token1", - Org: "org1", - Host: "http://localhost:8086", - Active: true, - }, - }, - result: `[default] - url = "http://localhost:8086" - token = "token1" - org = "org1" - active = true` + commentedStr, - }, - { - name: "multiple", - pp: Configs{ - "config1": Config{ - Token: "token1", - Host: "host1", - }, - "config2": Config{ - Token: "token2", - Host: "host2", - Org: "org2", - Active: true, - }, - "config3": Config{ - Token: "token3", - Host: "host3", - Org: "org3", - }, - }, - result: `[config1] - url = "host1" - token = "token1" - org = "" - -[config2] - url = "host2" - token = "token2" - org = "org2" - active = true - -[config3] - url = "host3" - token = "token3" - org = "org3"` + commentedStr, - }, - } - for _, c := range cases { - var b1 bytes.Buffer - err := (baseRW{w: &b1}).writeConfigs(c.pp) - influxtesting.ErrorsEqual(t, err, err) - if c.err == nil { - if diff := cmp.Diff(c.result, b1.String()); diff != "" { - t.Fatalf("write configs %s err, diff %s", c.name, diff) - } - } - } -} - -var commentedStr = ` -# -# [eu-central] -# url = "https://eu-central-1-1.aws.cloud2.influxdata.com" -# token = "XXX" -# org = "" -# -# [us-central] -# url = "https://us-central1-1.gcp.cloud2.influxdata.com" -# token = "XXX" -# org = "" -# -# [us-west] -# url = "https://us-west-2-1.aws.cloud2.influxdata.com" -# token = "XXX" -# org = "" -` - -func TestParseActiveConfig(t *testing.T) { - cases := []struct { - name string - hasErr bool - src string - p Config - }{ - { - name: "bad src", - src: "bad [toml", - hasErr: true, - }, - { - name: "nothing", - hasErr: true, - }, - { - name: "conflicted", - hasErr: true, - src: ` - [a1] - url = "host1" - active =true - [a2] - url = "host2" - active = true - `, - }, - { - name: "one active", - hasErr: false, - src: ` - [a1] - url = "host1" - [a2] - url = "host2" - active = true - [a3] - url = "host3" - [a4] - url = "host4" - `, - p: Config{ - Name: "a2", - Host: "host2", - Active: true, - }, - }, - } - for _, c := range cases { - r := bytes.NewBufferString(c.src) - p, err := ParseActiveConfig(r) - if c.hasErr { - if err == nil { - t.Fatalf("parse active config %q failed, should have error, got nil", c.name) - } - continue - } - if diff := cmp.Diff(p, c.p); diff != "" { - t.Fatalf("parse active config %s failed, diff %s", c.name, diff) - } - } -} - -func TestParsePreviousActiveConfig(t *testing.T) { - cases := []struct { - name string - hasErr bool - src string - p Config - }{ - { - name: "bad src", - src: "bad [toml", - hasErr: true, - }, - { - name: "nothing", - hasErr: true, - }, - { - name: "conflicted", - hasErr: true, - src: ` - [a1] - url = "host1" - previous =true - [a2] - url = "host2" - previous = true - `, - }, - { - name: "one previous active", - hasErr: false, - src: ` - [a1] - url = "host1" - [a2] - url = "host2" - previous = true - [a3] - url = "host3" - [a4] - url = "host4" - `, - p: Config{ - Name: "a2", - Host: "host2", - PreviousActive: true, - }, - }, - } - for _, c := range cases { - r := bytes.NewBufferString(c.src) - p, err := (baseRW{r: r}).parsePreviousActive() - if c.hasErr { - if err == nil { - t.Fatalf("parse previous active config %q failed, should have error, got nil", c.name) - } - continue - } - if diff := cmp.Diff(p, c.p); diff != "" { - t.Fatalf("parse previous active config %s failed, diff %s", c.name, diff) - } - } -} - -func TestConfigsSwith(t *testing.T) { - cases := []struct { - name string - old Configs - new Configs - target string - err error - }{ - { - name: "not found", - target: "p1", - old: Configs{ - "a1": {Host: "host1"}, - "a2": {Host: "host2"}, - }, - new: Configs{ - "a1": {Host: "host1"}, - "a2": {Host: "host2"}, - }, - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: `config "p1" is not found`, - }, - }, - { - name: "regular switch", - target: "a1", - old: Configs{ - "a1": {Host: "host1"}, - "a2": {Host: "host2"}, - "a3": {Host: "host3", Active: true}, - }, - new: Configs{ - "a1": {Host: "host1", Active: true}, - "a2": {Host: "host2"}, - "a3": {Host: "host3", PreviousActive: true}, - }, - err: nil, - }, - { - name: "regular to current active", // nothing should be changed - target: "a3", - old: Configs{ - "a1": {Host: "host1"}, - "a2": {Host: "host2"}, - "a3": {Host: "host3", Active: true}, - }, - new: Configs{ - "a1": {Host: "host1"}, - "a2": {Host: "host2"}, - "a3": {Host: "host3", Active: true}, - }, - err: nil, - }, - } - for _, c := range cases { - err := c.old.Switch(c.target) - influxtesting.ErrorsEqual(t, err, c.err) - if diff := cmp.Diff(c.old, c.new); diff != "" { - t.Fatalf("switch config %s failed, diff %s", c.name, diff) - } - } -} - -func TestConfigCreate(t *testing.T) { - cases := []struct { - name string - exists Configs - src Config - err error - result Config - stored Configs - }{ - { - name: "invalid name", - err: &influxdb.Error{ - Code: influxdb.EInvalid, - Msg: "config name is empty", - }, - }, - { - name: "new", - src: Config{ - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - }, - result: Config{ - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - }, - stored: Configs{ - "default": { - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - }, - }, - }, - { - name: "new active", - src: Config{ - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - result: Config{ - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - stored: Configs{ - "default": { - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - }, - }, - { - name: "conflict", - exists: Configs{ - "default": { - Name: "default", - Host: "host1", - }, - }, - src: Config{ - Name: "default", - Host: "host1", - }, - err: &influxdb.Error{ - Code: influxdb.EConflict, - Msg: `config "default" already exists`, - }, - }, - { - name: "existing", - exists: Configs{ - "default": { - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - }, - src: Config{ - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - result: Config{ - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - stored: Configs{ - "default": { - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - PreviousActive: true, - }, - "a1": { - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - }, - }, - } - for _, c := range cases { - svc, store := newBufferSVC() - _ = store.writeConfigs(c.exists) - result, err := svc.CreateConfig(c.src) - influxtesting.ErrorsEqual(t, err, c.err) - if err == nil { - if diff := cmp.Diff(result, c.result); diff != "" { - t.Fatalf("create config %s failed, diff %s", c.name, diff) - } - stored, err := store.ListConfigs() - if err != nil { - t.Fatalf("create config %s to list result, err %s", c.name, err.Error()) - } - if diff := cmp.Diff(stored, c.stored); diff != "" { - t.Fatalf("create config %s failed, diff %s", c.name, diff) - } - } - } -} - -func TestConfigSwitch(t *testing.T) { - cases := []struct { - name string - exists Configs - src string - err error - result Config - stored Configs - }{ - { - name: "empty", - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: `config "" is not found`, - }, - }, - { - name: "not found", - src: "default", - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: `config "default" is not found`, - }, - }, - { - name: "regular", - exists: Configs{ - "a1": { - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - }, - "a2": { - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - Active: true, - }, - }, - src: "a1", - result: Config{ - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - stored: Configs{ - "a1": { - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - "a2": { - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - PreviousActive: true, - }, - }, - }, - { - name: "switch back", - exists: Configs{ - "a1": { - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - PreviousActive: true, - }, - "a2": { - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - Active: true, - }, - }, - src: "-", - result: Config{ - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - stored: Configs{ - "a1": { - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - "a2": { - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - PreviousActive: true, - }, - }, - }, - { - name: "switch back with no previous", - exists: Configs{ - "a1": { - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - }, - "a2": { - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - Active: true, - }, - }, - src: "-", - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: "previous activated config is not found", - }, - }, - } - for _, c := range cases { - svc, store := newBufferSVC() - _ = store.writeConfigs(c.exists) - result, err := svc.SwitchActive(c.src) - influxtesting.ErrorsEqual(t, err, c.err) - if err == nil { - if diff := cmp.Diff(result, c.result); diff != "" { - t.Fatalf("switch config %s failed, diff %s", c.name, diff) - } - stored, err := store.ListConfigs() - if err != nil { - t.Fatalf("switch config %s to list result, err %s", c.name, err.Error()) - } - if diff := cmp.Diff(stored, c.stored); diff != "" { - t.Fatalf("switch config %s failed, diff %s", c.name, diff) - } - } - } -} - -func TestConfigUpdate(t *testing.T) { - cases := []struct { - name string - exists Configs - src Config - err error - result Config - stored Configs - }{ - { - name: "empty", - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: `config "" is not found`, - }, - }, - { - name: "not found", - src: Config{ - Name: "default", - Host: "host1", - Org: "org1", - Token: "tok1", - }, - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: `config "default" is not found`, - }, - }, - { - name: "regular", - exists: Configs{ - "a1": { - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - }, - "a2": { - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - Active: true, - }, - }, - src: Config{ - Name: "a1", - Host: "host11", - Org: "org11", - Token: "tok11", - Active: true, - }, - result: Config{ - Name: "a1", - Host: "host11", - Org: "org11", - Token: "tok11", - Active: true, - }, - stored: Configs{ - "a1": { - Name: "a1", - Host: "host11", - Org: "org11", - Token: "tok11", - Active: true, - }, - "a2": { - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - PreviousActive: true, - }, - }, - }, - } - for _, c := range cases { - svc, store := newBufferSVC() - _ = store.writeConfigs(c.exists) - result, err := svc.UpdateConfig(c.src) - influxtesting.ErrorsEqual(t, err, c.err) - if err == nil { - if diff := cmp.Diff(result, c.result); diff != "" { - t.Fatalf("update config %s failed, diff %s", c.name, diff) - } - stored, err := store.ListConfigs() - if err != nil { - t.Fatalf("update config %s to list result, err %s", c.name, err.Error()) - } - if diff := cmp.Diff(stored, c.stored); diff != "" { - t.Fatalf("update config %s failed, diff %s", c.name, diff) - } - } - } -} - -func TestConfigDelete(t *testing.T) { - cases := []struct { - name string - exists Configs - target string - err error - result Config - stored Configs - }{ - { - name: "empty", - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: `config "" is not found`, - }, - }, - { - name: "not found", - target: "bad", - exists: Configs{ - "default": { - Name: "default", - Host: "host1", - }, - }, - err: &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: `config "bad" is not found`, - }, - }, - { - name: "regular", - exists: Configs{ - "default": { - Name: "default", - Host: "host1", - }, - }, - target: "default", - result: Config{ - Name: "default", - Host: "host1", - }, - stored: Configs{}, - }, - { - name: "more than 1", - exists: Configs{ - "a1": { - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - "a2": { - Host: "host2", - Org: "org2", - Token: "tok2", - }, - }, - target: "a1", - result: Config{ - Name: "a1", - Host: "host1", - Org: "org1", - Token: "tok1", - Active: true, - }, - stored: Configs{ - "a2": { - Active: true, - Name: "a2", - Host: "host2", - Org: "org2", - Token: "tok2", - }, - }, - }, - } - for _, c := range cases { - fn := func(t *testing.T) { - svc, store := newBufferSVC() - _ = store.writeConfigs(c.exists) - result, err := svc.DeleteConfig(c.target) - influxtesting.ErrorsEqual(t, err, c.err) - if err == nil { - if diff := cmp.Diff(result, c.result); diff != "" { - t.Fatalf("delete config %s failed, diff %s", c.name, diff) - } - stored, err := store.ListConfigs() - if err != nil { - t.Fatalf("delete config %s to list result, err %s", c.name, err.Error()) - } - if diff := cmp.Diff(stored, c.stored); diff != "" { - t.Fatalf("delete config %s failed, diff %s", c.name, diff) - } - } - } - t.Run(c.name, fn) - } -} - -func newBufferSVC() (Service, *bytesStore) { - store := new(bytesStore) - return newConfigsSVC(store), store -} - -type bytesStore struct { - data []byte -} - -func (s *bytesStore) writeConfigs(cfgs Configs) error { - var b bytes.Buffer - if err := (baseRW{w: &b}).writeConfigs(cfgs); err != nil { - return err - } - s.data = b.Bytes() - return nil -} - -func (s *bytesStore) ListConfigs() (Configs, error) { - return baseRW{ - r: bytes.NewBuffer(s.data), - }.ListConfigs() -} - -func (s *bytesStore) parsePreviousActive() (Config, error) { - return (baseRW{ - r: bytes.NewBuffer(s.data), - }).parsePreviousActive() -} diff --git a/cmd/influx/config_test.go b/cmd/influx/config_test.go deleted file mode 100644 index 647deb16779..00000000000 --- a/cmd/influx/config_test.go +++ /dev/null @@ -1,664 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influx/config" - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCmdConfig(t *testing.T) { - t.Run("create", func(t *testing.T) { - t.Run("with valid args should be successful", func(t *testing.T) { - tests := []struct { - name string - original config.Configs - expected config.Config - flags []string - }{ - { - name: "basic", - flags: []string{ - "--config-name", "default", - "--org", "org1", - "--host-url", "http://localhost:8086", - "--token", "tok1", - "--active", - }, - original: make(config.Configs), - expected: config.Config{ - Name: "default", - Org: "org1", - Active: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - { - name: "short", - flags: []string{ - "-n", "default", - "-o", "org1", - "-u", "http://localhost:8086", - "-t", "tok1", - "-a", - }, - original: make(config.Configs), - expected: config.Config{ - Name: "default", - Org: "org1", - Active: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - { - name: "short new with existing", - flags: []string{ - "-n", "default", - "-o", "org1", - "-u", "http://localhost:8086", - "-t", "tok1", - "-a", - }, - original: config.Configs{ - "config1": { - Org: "org1", - Active: true, - Token: "tok1", - Host: "host1", - }, - }, - expected: config.Config{ - Name: "default", - Org: "org1", - Active: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - } - cmdFn := func(original config.Configs, expected config.Config) func(*globalFlags, genericCLIOpts) *cobra.Command { - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: g, - svcFn: func(_ string) config.Service { - return &mockConfigService{ - CreateConfigFn: func(cfg config.Config) (config.Config, error) { - if diff := cmp.Diff(expected, cfg); diff != "" { - return config.Config{}, &influxdb.Error{ - Msg: fmt.Sprintf("create config failed, diff %s", diff), - } - } - return expected, nil - }, - } - }, - } - return builder.cmd() - } - } - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.original, tt.expected)) - cmd.SetArgs(append([]string{"config", "create"}, tt.flags...)) - require.NoError(t, cmd.Execute()) - } - t.Run(tt.name, fn) - } - }) - - t.Run("handles non default configs path", func(t *testing.T) { - dir := newTempDir(t) - defer os.RemoveAll(dir) - - file := filepath.Join(dir, "configurations") - - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdConfig) - - flags := []string{ - "--configs-path=" + file, - "-n", "default", - "-o", "org1", - "-u", "http://localhost:8086", - "-t", "tok1", - "-a", - } - cmd.SetArgs(append([]string{"config", "create"}, flags...)) - require.NoError(t, cmd.Execute()) - - configs, err := config.NewLocalConfigSVC(file, dir).ListConfigs() - require.NoError(t, err) - - cfg, ok := configs["default"] - require.True(t, ok) - assert.Equal(t, "default", cfg.Name) - assert.Equal(t, "org1", cfg.Org) - assert.Equal(t, "http://localhost:8086", cfg.Host) - assert.Equal(t, "tok1", cfg.Token) - assert.True(t, cfg.Active) - }) - - t.Run("rejects a config option with an invalid host url", func(t *testing.T) { - cmdFn := func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: g, - svcFn: func(_ string) config.Service { - return &mockConfigService{ - CreateConfigFn: func(cfg config.Config) (config.Config, error) { - return cfg, nil - }, - } - }, - } - return builder.cmd() - } - testConfigInvalidURLs(t, "create", cmdFn) - }) - }) - - t.Run("switch", func(t *testing.T) { - tests := []struct { - name string - original config.Configs - expected config.Config - arg string - }{ - { - name: "basic", - arg: "default", - original: config.Configs{ - "config1": { - Name: "config1", - Org: "org2", - Active: true, - Token: "tok2", - Host: "http://localhost:8888", - }, - "default": { - Name: "default", - Org: "org1", - Active: false, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - expected: config.Config{ - Name: "default", - Org: "org1", - Active: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - { - name: "back", - arg: "-", - original: config.Configs{ - "config1": { - Name: "config1", - Org: "org2", - Active: true, - Token: "tok2", - Host: "http://localhost:8888", - }, - "default": { - Name: "default", - Org: "org1", - Active: false, - PreviousActive: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - expected: config.Config{ - Name: "default", - Org: "org1", - Active: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - } - cmdFn := func(original config.Configs, expected config.Config) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := func(_ string) config.Service { - return &mockConfigService{ - SwitchActiveFn: func(name string) (config.Config, error) { - var cfg config.Config - for _, item := range original { - if name == "-" && item.PreviousActive || - item.Name == name { - cfg = item - break - - } - } - cfg.Active = true - cfg.PreviousActive = false - if diff := cmp.Diff(expected, cfg); diff != "" { - return config.Config{}, &influxdb.Error{ - Msg: fmt.Sprintf("switch config failed, diff %s", diff), - } - } - return expected, nil - }, - } - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: g, - svcFn: svc, - } - return builder.cmd() - } - } - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.original, tt.expected)) - cmd.SetArgs([]string{"config", tt.arg}) - require.NoError(t, cmd.Execute()) - } - t.Run(tt.name, fn) - } - }) - - t.Run("set", func(t *testing.T) { - t.Run("with valid args should be successful", func(t *testing.T) { - tests := []struct { - name string - expected config.Config - flags []string - }{ - { - name: "basic", - flags: []string{ - "--config-name", "default", - "--org", "org1", - "--host-url", "http://localhost:8086", - "--token", "tok1", - "--active", - }, - expected: config.Config{ - Name: "default", - Org: "org1", - Active: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - { - name: "only org", - flags: []string{ - "--config-name", "default", - "--org", "org1", - }, - expected: config.Config{ - Name: "default", - Org: "org1", - }, - }, - { - name: "only host", - flags: []string{ - "--config-name", "default", - "--host-url", "http://example.com", - }, - expected: config.Config{ - Name: "default", - Host: "http://example.com", - }, - }, - { - name: "only token", - flags: []string{ - "--config-name", "default", - "--token", "footoken", - }, - expected: config.Config{ - Name: "default", - Token: "footoken", - }, - }, - { - name: "only token and org", - flags: []string{ - "--config-name", "default", - "--token", "footoken", - "--org", "org", - }, - expected: config.Config{ - Name: "default", - Org: "org", - Token: "footoken", - }, - }, - { - name: "short", - flags: []string{ - "-n", "default", - "-o", "org1", - "-u", "http://localhost:8086", - "-t", "tok1", - "-a", - }, - expected: config.Config{ - Name: "default", - Org: "org1", - Active: true, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - } - cmdFn := func(expected config.Config) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := func(_ string) config.Service { - return &mockConfigService{ - UpdateConfigFn: func(cfg config.Config) (config.Config, error) { - if diff := cmp.Diff(expected, cfg); diff != "" { - return config.Config{}, &influxdb.Error{ - Msg: fmt.Sprintf("update config failed, diff %s", diff), - } - } - return expected, nil - }, - } - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: g, - svcFn: svc, - } - return builder.cmd() - } - } - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.expected)) - cmd.SetArgs(append([]string{"config", "set"}, tt.flags...)) - require.NoError(t, cmd.Execute()) - } - t.Run(tt.name, fn) - } - }) - - t.Run("rejects a config option with an invalid host url", func(t *testing.T) { - cmdFn := func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: g, - svcFn: func(_ string) config.Service { - return &mockConfigService{ - CreateConfigFn: func(cfg config.Config) (config.Config, error) { - return cfg, nil - }, - } - }, - } - return builder.cmd() - } - - testConfigInvalidURLs(t, "set", cmdFn) - }) - }) - - t.Run("delete", func(t *testing.T) { - tests := []struct { - name string - original config.Configs - expected config.Config - flags []string - }{ - { - name: "basic", - flags: []string{ - "--name", "default", - }, - original: config.Configs{ - "default": { - Name: "default", - Org: "org2", - Active: false, - Token: "tok2", - Host: "http://localhost:8888", - }, - }, - expected: config.Config{ - Name: "default", - Org: "org2", - Active: false, - Token: "tok2", - Host: "http://localhost:8888", - }, - }, - { - name: "short", - flags: []string{ - "-n", "default", - }, - original: config.Configs{ - "default": { - Name: "default", - Org: "org2", - Active: false, - Token: "tok2", - Host: "http://localhost:8888", - }, - }, - expected: config.Config{ - Name: "default", - Org: "org2", - Active: false, - Token: "tok2", - Host: "http://localhost:8888", - }, - }, - } - cmdFn := func(original config.Configs, expected config.Config) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := func(_ string) config.Service { - return &mockConfigService{ - DeleteConfigFn: func(name string) (config.Config, error) { - var cfg config.Config - for _, item := range original { - if item.Name == name { - cfg = item - break - } - } - if diff := cmp.Diff(expected, cfg); diff != "" { - return config.Config{}, &influxdb.Error{ - Msg: fmt.Sprintf("delete config failed, diff %s", diff), - } - } - return expected, nil - }, - } - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: g, - svcFn: svc, - } - return builder.cmd() - } - } - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.original, tt.expected)) - cmd.SetArgs(append([]string{"config", "delete"}, tt.flags...)) - require.NoError(t, cmd.Execute()) - } - t.Run(tt.name, fn) - } - }) - - t.Run("list", func(t *testing.T) { - tests := []struct { - name string - expected config.Configs - }{ - { - name: "basic", - expected: config.Configs{ - "default": { - Org: "org2", - Active: false, - Token: "tok2", - Host: "http://localhost:8888", - }, - "kubone": { - Org: "org1", - Active: false, - Token: "tok1", - Host: "http://localhost:8086", - }, - }, - }, - } - cmdFn := func(expected config.Configs) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := func(_ string) config.Service { - return &mockConfigService{ - ListConfigsFn: func() (config.Configs, error) { - return expected, nil - }, - } - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := cmdConfigBuilder{ - genericCLIOpts: opt, - globalFlags: g, - svcFn: svc, - } - return builder.cmd() - } - } - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.expected)) - cmd.SetArgs([]string{"config", "list"}) - require.NoError(t, cmd.Execute()) - } - t.Run(tt.name, fn) - } - }) -} - -func testConfigInvalidURLs(t *testing.T, cmdName string, cmdFn func(*globalFlags, genericCLIOpts) *cobra.Command) { - tests := []struct { - name string - flags []string - }{ - { - name: "missing scheme", - flags: []string{ - "--config-name", "default", - "--org", "org1", - "--host-url", "localhost:8086", - "--token", "tok1", - }, - }, - { - name: "invalid url", - flags: []string{ - "--config-name", "default", - "--org", "org1", - "--host-url", "rando@@ s_ threeve", - "--token", "tok1", - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn) - cmd.SetArgs(append([]string{"config", cmdName}, tt.flags...)) - require.Error(t, cmd.Execute(), "cmd name: influx config "+cmdName) - } - t.Run(tt.name, fn) - } -} - -// mockConfigService mocks the ConfigService. -type mockConfigService struct { - CreateConfigFn func(config.Config) (config.Config, error) - DeleteConfigFn func(name string) (config.Config, error) - UpdateConfigFn func(config.Config) (config.Config, error) - ParseConfigsFn func() (config.Configs, error) - SwitchActiveFn func(name string) (config.Config, error) - ListConfigsFn func() (config.Configs, error) -} - -// ParseConfigs returns the parse fn. -func (s *mockConfigService) ParseConfigs() (config.Configs, error) { - return s.ParseConfigsFn() -} - -// CreateConfig create a config. -func (s *mockConfigService) CreateConfig(cfg config.Config) (config.Config, error) { - return s.CreateConfigFn(cfg) -} - -// DeleteConfig will delete by name. -func (s *mockConfigService) DeleteConfig(name string) (config.Config, error) { - return s.DeleteConfigFn(name) -} - -// UpdateConfig will update the config. -func (s *mockConfigService) UpdateConfig(up config.Config) (config.Config, error) { - return s.UpdateConfigFn(up) -} - -// SwitchActive active the config by name. -func (s *mockConfigService) SwitchActive(name string) (config.Config, error) { - return s.SwitchActiveFn(name) -} - -// ListConfigs lists all the configs. -func (s *mockConfigService) ListConfigs() (config.Configs, error) { - return s.ListConfigsFn() -} diff --git a/cmd/influx/dashboard.go b/cmd/influx/dashboard.go deleted file mode 100644 index df6368b56ab..00000000000 --- a/cmd/influx/dashboard.go +++ /dev/null @@ -1,154 +0,0 @@ -package main - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influx/internal" - "github.com/influxdata/influxdb/v2/dashboards/transport" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/spf13/cobra" -) - -func cmdDashboard(f *globalFlags, opts genericCLIOpts) *cobra.Command { - return newCmdDashboardBuilder(newDashboardSVCs, f, opts).cmdDashboards() -} - -type dashboardSVCsFn func() (influxdb.DashboardService, influxdb.OrganizationService, error) - -type cmdDashboardBuilder struct { - genericCLIOpts - *globalFlags - - svcFn dashboardSVCsFn - - ids []string - org organization -} - -func newCmdDashboardBuilder(svcFn dashboardSVCsFn, f *globalFlags, opts genericCLIOpts) *cmdDashboardBuilder { - return &cmdDashboardBuilder{ - genericCLIOpts: opts, - globalFlags: f, - svcFn: svcFn, - } -} - -func (b *cmdDashboardBuilder) cmdDashboards() *cobra.Command { - cmd := b.newCmd("dashboards", b.listRunE) - cmd.Short = "List Dashboard(s)." - cmd.Long = ` - List Dashboard(s). - - Examples: - # list all known Dashboards - influx dashboards - - # list all known Dashboards matching ids - influx dashboards --id $ID1 --id $ID2 - - # list all known Dashboards matching ids shorts - influx dashboards -i $ID1 -i $ID2 -` - - b.org.register(b.viper, cmd, false) - cmd.Flags().StringArrayVarP(&b.ids, "id", "i", nil, "Dashboard ID to retrieve.") - - return cmd -} - -func (b *cmdDashboardBuilder) listRunE(cmd *cobra.Command, args []string) error { - svc, orgSVC, err := b.svcFn() - if err != nil { - return err - } - - orgID, _ := b.org.getID(orgSVC) - if orgID == 0 && len(b.ids) == 0 { - return &influxdb.Error{ - Code: influxdb.EUnprocessableEntity, - Msg: "at least one of org, org-id, or id must be provided", - } - } - - var ids []*influxdb.ID - for _, rawID := range b.ids { - id, err := influxdb.IDFromString(rawID) - if err != nil { - return err - } - ids = append(ids, id) - } - - var ( - out []*influxdb.Dashboard - offset int - ) - const limit = 100 - for { - dashboards, _, err := svc.FindDashboards(context.Background(), influxdb.DashboardFilter{ - IDs: ids, - OrganizationID: &orgID, - }, influxdb.FindOptions{ - Limit: limit, - Offset: offset, - }) - if err != nil && influxdb.ErrorCode(err) != influxdb.ENotFound { - return err - } - out = append(out, dashboards...) - if len(dashboards) < limit { - break - } - offset += len(dashboards) - } - - return b.writeDashboards(out...) -} - -func (b *cmdDashboardBuilder) writeDashboards(dashboards ...*influxdb.Dashboard) error { - if b.json { - return b.writeJSON(dashboards) - } - - tabW := b.newTabWriter() - defer tabW.Flush() - - writeDashboardRows(tabW, dashboards...) - return nil -} - -func writeDashboardRows(tabW *internal.TabWriter, dashboards ...*influxdb.Dashboard) { - tabW.WriteHeaders("ID", "OrgID", "Name", "Description", "Num Cells") - for _, d := range dashboards { - tabW.Write(map[string]interface{}{ - "ID": d.ID, - "OrgID": d.OrganizationID.String(), - "Name": d.Name, - "Description": d.Description, - "Num Cells": len(d.Cells), - }) - } -} - -func (b *cmdDashboardBuilder) newCmd(use string, runE func(*cobra.Command, []string) error) *cobra.Command { - cmd := b.genericCLIOpts.newCmd(use, runE, true) - b.genericCLIOpts.registerPrintOptions(cmd) - b.globalFlags.registerFlags(b.viper, cmd) - return cmd -} - -func newDashboardSVCs() (influxdb.DashboardService, influxdb.OrganizationService, error) { - httpClient, err := newHTTPClient() - if err != nil { - return nil, nil, err - } - - orgSVC := &tenant.OrgClientService{ - Client: httpClient, - } - dashSVC := &transport.DashboardService{ - Client: httpClient, - } - return dashSVC, orgSVC, nil -} diff --git a/cmd/influx/delete.go b/cmd/influx/delete.go deleted file mode 100644 index 4850acac0b0..00000000000 --- a/cmd/influx/delete.go +++ /dev/null @@ -1,107 +0,0 @@ -package main - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/signals" - "github.com/spf13/cobra" -) - -func cmdDelete(f *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := &cmdDeleteBuilder{ - genericCLIOpts: opt, - globalFlags: f, - } - return builder.cmd() -} - -type cmdDeleteBuilder struct { - genericCLIOpts - *globalFlags - - flags http.DeleteRequest -} - -func (b *cmdDeleteBuilder) cmd() *cobra.Command { - cmd := b.newCmd("delete", b.fluxDeleteF) - cmd.Short = "Delete points from influxDB" - cmd.Long = `Delete points from influxDB, by specify start, end time - and a sql like predicate string.` - - opts := flagOpts{ - { - DestP: &b.flags.OrgID, - Flag: "org-id", - Desc: "The ID of the organization that owns the bucket", - Persistent: true, - }, - { - DestP: &b.flags.Org, - Flag: "org", - Short: 'o', - Desc: "The name of the organization that owns the bucket", - Persistent: true, - }, - { - DestP: &b.flags.BucketID, - Flag: "bucket-id", - Desc: "The ID of the destination bucket", - Persistent: true, - }, - { - DestP: &b.flags.Bucket, - Flag: "bucket", - Desc: "The name of destination bucket", - EnvVar: "BUCKET_NAME", - Persistent: true, - }, - } - opts.mustRegister(b.viper, cmd) - - cmd.PersistentFlags().StringVar(&b.flags.Start, "start", "", "the start time in RFC3339Nano format, exp 2009-01-02T23:00:00Z") - cmd.PersistentFlags().StringVar(&b.flags.Stop, "stop", "", "the stop time in RFC3339Nano format, exp 2009-01-02T23:00:00Z") - cmd.PersistentFlags().StringVarP(&b.flags.Predicate, "predicate", "p", "", "sql like predicate string, exp 'tag1=\"v1\" and (tag2=123)'") - - return cmd -} - -func (b *cmdDeleteBuilder) fluxDeleteF(cmd *cobra.Command, args []string) error { - ac := b.globalFlags.config() - - org := b.flags.Org - if org == "" { - org = ac.Org - } - if org == "" && b.flags.OrgID == "" { - return fmt.Errorf("please specify one of org or org-id") - } - - if b.flags.Bucket == "" && b.flags.BucketID == "" { - return fmt.Errorf("please specify one of bucket or bucket-id") - } - - if b.flags.Start == "" || b.flags.Stop == "" { - return fmt.Errorf("both start and stop are required") - } - - s := &http.DeleteService{ - Addr: ac.Host, - Token: ac.Token, - InsecureSkipVerify: flags.skipVerify, - } - - ctx := signals.WithStandardSignals(context.Background()) - if err := s.DeleteBucketRangePredicate(ctx, b.flags); err != nil && err != context.Canceled { - return fmt.Errorf("failed to delete data: %v", err) - } - - return nil -} - -func (b *cmdDeleteBuilder) newCmd(use string, runE func(*cobra.Command, []string) error) *cobra.Command { - cmd := b.genericCLIOpts.newCmd(use, runE, true) - b.globalFlags.registerFlags(b.viper, cmd) - return cmd -} diff --git a/cmd/influx/internal/errorfmt.go b/cmd/influx/internal/errorfmt.go deleted file mode 100644 index 2828f414cd7..00000000000 --- a/cmd/influx/internal/errorfmt.go +++ /dev/null @@ -1,35 +0,0 @@ -package internal - -import ( - "errors" - "strings" - "unicode" -) - -// ErrorFmt formats errors presented to the user such that the first letter in the error -// is capitalized and ends with an appropriate punctuation. -func ErrorFmt(err error) error { - if err == nil { - return nil - } - - s := err.Error() - - s = strings.Trim(s, "\n .!?") - - count := 0 - s = strings.Map( - func(r rune) rune { - defer func() { count++ }() - if count == 0 { - return unicode.ToUpper(r) - } - return r - }, - s, - ) - - s = s + "." - - return errors.New(s) -} diff --git a/cmd/influx/internal/errorfmt_test.go b/cmd/influx/internal/errorfmt_test.go deleted file mode 100644 index d943ae695b2..00000000000 --- a/cmd/influx/internal/errorfmt_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package internal_test - -import ( - "errors" - "testing" - - "github.com/influxdata/influxdb/v2/cmd/influx/internal" -) - -func TestErrorFmt(t *testing.T) { - tests := []struct { - name string - err string - fmterr string - }{ - { - name: "error already formatted", - err: "Invalid ID.", - fmterr: "Invalid ID.", - }, - { - name: "error missing period", - err: "Invalid ID", - fmterr: "Invalid ID.", - }, - { - name: "error does not start with a capital letter", - err: "invalid ID.", - fmterr: "Invalid ID.", - }, - { - name: "error does not start with a capital letter or end with period", - err: "invalid ID", - fmterr: "Invalid ID.", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fmterr := internal.ErrorFmt(errors.New(tt.err)) - - if got, want := fmterr.Error(), tt.fmterr; got != want { - t.Errorf("error strings do not match. got/want\n%s\n%s\n", got, want) - } - }) - } -} diff --git a/cmd/influx/internal/tabwriter.go b/cmd/influx/internal/tabwriter.go deleted file mode 100644 index aeee4226ce0..00000000000 --- a/cmd/influx/internal/tabwriter.go +++ /dev/null @@ -1,70 +0,0 @@ -package internal - -import ( - "fmt" - "io" - "strings" - "text/tabwriter" - - platform "github.com/influxdata/influxdb/v2" -) - -// TabWriter wraps tab writer headers logic. -type TabWriter struct { - writer *tabwriter.Writer - headers []string - hideHeaders bool -} - -// NewTabWriter creates a new tab writer. -func NewTabWriter(w io.Writer) *TabWriter { - return &TabWriter{ - writer: tabwriter.NewWriter(w, 0, 8, 1, '\t', 0), - } -} - -// HideHeaders will set the hideHeaders flag. -func (w *TabWriter) HideHeaders(b bool) { - w.hideHeaders = b -} - -// WriteHeaders will write headers. -func (w *TabWriter) WriteHeaders(h ...string) { - w.headers = h - if !w.hideHeaders { - fmt.Fprintln(w.writer, strings.Join(h, "\t")) - } -} - -// Write will write the map into embed tab writer. -func (w *TabWriter) Write(m map[string]interface{}) { - body := make([]interface{}, len(w.headers)) - types := make([]string, len(w.headers)) - for i, h := range w.headers { - v := m[h] - body[i] = v - types[i] = formatStringType(v) - } - - formatString := strings.Join(types, "\t") - fmt.Fprintf(w.writer, formatString+"\n", body...) -} - -// Flush should be called after the last call to Write to ensure -// that any data buffered in the Writer is written to output. Any -// incomplete escape sequence at the end is considered -// complete for formatting purposes. -func (w *TabWriter) Flush() { - w.writer.Flush() -} - -func formatStringType(i interface{}) string { - switch i.(type) { - case int: - return "%d" - case platform.ID, string: - return "%s" - } - - return "%v" -} diff --git a/cmd/influx/main.go b/cmd/influx/main.go deleted file mode 100644 index 9ada530afe9..00000000000 --- a/cmd/influx/main.go +++ /dev/null @@ -1,610 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influx/config" - "github.com/influxdata/influxdb/v2/cmd/influx/internal" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/internal/fs" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const maxTCPConnections = 10 - -var ( - version = "dev" - commit = "none" - date = "" - defaultConfigsPath = mustDefaultConfigPath() -) - -func main() { - if len(date) == 0 { - date = time.Now().UTC().Format(time.RFC3339) - } - - influxCmd := influxCmd() - if err := influxCmd.Execute(); err != nil { - seeHelp(influxCmd, nil) - os.Exit(1) - } -} - -var ( - httpClient *httpc.Client -) - -func newHTTPClient() (*httpc.Client, error) { - if httpClient != nil { - return httpClient, nil - } - - userAgent := fmt.Sprintf( - "influx/%s (%s) Sha/%s Date/%s", - version, runtime.GOOS, commit, date, - ) - - opts := []httpc.ClientOptFn{ - httpc.WithUserAgentHeader(userAgent), - } - // This is useful for forcing tracing on a given endpoint. - if flags.traceDebugID != "" { - opts = append(opts, httpc.WithHeader("jaeger-debug-id", flags.traceDebugID)) - } - - ac := flags.config() - c, err := http.NewHTTPClient(ac.Host, ac.Token, flags.skipVerify, opts...) - if err != nil { - return nil, err - } - - httpClient = c - return httpClient, nil -} - -type ( - cobraRunEFn func(cmd *cobra.Command, args []string) error - - cobraRunEMiddleware func(fn cobraRunEFn) cobraRunEFn - - genericCLIOptFn func(*genericCLIOpts) -) - -type genericCLIOpts struct { - in io.Reader - w io.Writer - errW io.Writer - viper *viper.Viper - - json bool - hideHeaders bool - - runEWrapFn cobraRunEMiddleware -} - -func (o genericCLIOpts) newCmd(use string, runE func(*cobra.Command, []string) error, useRunEMiddleware bool) *cobra.Command { - cmd := &cobra.Command{ - Args: cobra.NoArgs, - Use: use, - RunE: runE, - } - - canWrapRunE := runE != nil && o.runEWrapFn != nil - if useRunEMiddleware && canWrapRunE { - cmd.RunE = o.runEWrapFn(runE) - } else if canWrapRunE { - cmd.RunE = runE - } - - cmd.SetOut(o.w) - cmd.SetIn(o.in) - cmd.SetErr(o.errW) - return cmd -} - -func (o genericCLIOpts) writeJSON(v interface{}) error { - return writeJSON(o.w, v) -} - -func (o genericCLIOpts) newTabWriter() *internal.TabWriter { - w := internal.NewTabWriter(o.w) - w.HideHeaders(o.hideHeaders) - return w -} - -func (o *genericCLIOpts) registerPrintOptions(cmd *cobra.Command) { - registerPrintOptions(o.viper, cmd, &o.hideHeaders, &o.json) -} - -func in(r io.Reader) genericCLIOptFn { - return func(o *genericCLIOpts) { - o.in = r - } -} - -func out(w io.Writer) genericCLIOptFn { - return func(o *genericCLIOpts) { - o.w = w - } -} - -type globalFlags struct { - skipVerify bool - token string - host string - traceDebugID string - filepath string - activeConfig string - configs config.Configs -} - -func (g *globalFlags) config() config.Config { - if ac := g.activeConfig; ac != "" { - c, ok := g.configs[ac] - if !ok { - // this is unrecoverable - fmt.Fprintf(os.Stderr, "Err: active config %q was not found\n", ac) - os.Exit(1) - } - if g.host != "" { - c.Host = g.host - } - if g.token != "" { - c.Token = g.token - } - return c - } - return g.configs.Active() -} - -func (g *globalFlags) registerFlags(v *viper.Viper, cmd *cobra.Command, skipFlags ...string) { - if g == nil { - panic("global flags are not set: ") - } - - skips := make(map[string]bool) - for _, flag := range skipFlags { - skips[flag] = true - } - - fOpts := flagOpts{ - { - DestP: &g.token, - Flag: "token", - Short: 't', - Desc: "Authentication token", - }, - { - DestP: &g.host, - Flag: "host", - Desc: "HTTP address of InfluxDB", - }, - { - DestP: &g.traceDebugID, - Flag: "trace-debug-id", - Hidden: true, - }, - { - DestP: &g.filepath, - Flag: "configs-path", - Desc: "Path to the influx CLI configurations", - Default: defaultConfigsPath, - }, - { - DestP: &g.activeConfig, - Flag: "active-config", - Desc: "Config name to use for command", - Short: 'c', - }, - } - - var filtered flagOpts - for _, o := range fOpts { - if skips[o.Flag] { - continue - } - filtered = append(filtered, o) - } - - filtered.mustRegister(v, cmd) - - if skips["skip-verify"] { - return - } - cmd.Flags().BoolVar(&g.skipVerify, "skip-verify", false, "Skip TLS certificate chain and host name verification.") -} - -var flags globalFlags - -type cmdInfluxBuilder struct { - genericCLIOpts - - once sync.Once -} - -func newInfluxCmdBuilder(optFns ...genericCLIOptFn) *cmdInfluxBuilder { - builder := new(cmdInfluxBuilder) - - opt := genericCLIOpts{ - in: os.Stdin, - w: os.Stdout, - errW: os.Stderr, - runEWrapFn: checkSetupRunEMiddleware(&flags), - viper: viper.New(), - } - for _, optFn := range optFns { - optFn(&opt) - } - - builder.genericCLIOpts = opt - return builder -} - -func (b *cmdInfluxBuilder) cmd(childCmdFns ...func(f *globalFlags, opt genericCLIOpts) *cobra.Command) *cobra.Command { - b.once.Do(func() { - // enforce that viper options only ever get set once - setViperOptions(b.viper) - }) - - cmd := b.newCmd("influx", nil, false) - cmd.Short = "Influx Client" - cmd.SilenceUsage = true - - for _, childCmd := range childCmdFns { - cmd.AddCommand(childCmd(&flags, b.genericCLIOpts)) - } - - cmd.PersistentPreRun = func(cmd *cobra.Command, args []string) { - // migration credential token - migrateOldCredential(flags.filepath) - - // this is after the flagOpts register b/c we don't want to show the default value - // in the usage display. This will add it as the config, then if a token flag - // is provided too, the flag will take precedence. - flags.configs = getConfigFromDefaultPath(flags.filepath) - - cfg := flags.configs.Active() - - // we have some indirection here b/c of how the Config is embedded on the - // global flags type. For the time being, we check to see if there was a - // value set on flags registered (via env vars), and override the host/token - // values if they are. - if flags.token != "" { - cfg.Token = flags.token - } - if flags.host != "" { - cfg.Host = flags.host - } - flags.configs[cfg.Name] = cfg - } - - // Update help description for all commands in command tree - walk(cmd, func(c *cobra.Command) { - c.Flags().BoolP("help", "h", false, fmt.Sprintf("Help for the %s command ", c.Name())) - }) - - // completion command goes last, after the walk, so that all - // commands have every flag listed in the bash|zsh completions. - cmd.AddCommand( - completionCmd(cmd), - cmdVersion(), - ) - return cmd -} - -func cmdVersion() *cobra.Command { - return &cobra.Command{ - Use: "version", - Short: "Print the influx CLI version", - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Influx CLI %s (git: %s) build_date: %s\n", version, commit, date) - }, - } -} - -func influxCmd(opts ...genericCLIOptFn) *cobra.Command { - builder := newInfluxCmdBuilder(opts...) - return builder.cmd( - cmdAuth, - cmdBackup, - cmdBucket, - cmdConfig, - cmdDashboard, - cmdDelete, - cmdExport, - cmdOrganization, - cmdPing, - cmdQuery, - cmdRestore, - cmdSecret, - cmdSetup, - cmdStack, - cmdTask, - cmdTelegraf, - cmdTemplate, - cmdApply, - cmdTranspile, - cmdUser, - cmdWrite, - cmdV1SubCommands, - ) -} - -func fetchSubCommand(parent *cobra.Command, args []string) *cobra.Command { - var err error - var cmd *cobra.Command - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } - - if parent.TraverseChildren { - cmd, _, err = parent.Traverse(args) - } else { - cmd, _, err = parent.Find(args) - } - // return nil if any errs - if err != nil { - return nil - } - return cmd -} - -func seeHelp(c *cobra.Command, args []string) { - if c = fetchSubCommand(c, args); c == nil { - return //return here, since cobra already handles the error - } - c.Printf("See '%s -h' for help\n", c.CommandPath()) -} - -func getConfigFromDefaultPath(configsPath string) config.Configs { - r, err := os.Open(configsPath) - if err != nil { - return config.Configs{ - config.DefaultConfig.Name: config.DefaultConfig, - } - } - defer r.Close() - - cfgs, err := config. - NewLocalConfigSVC(configsPath, filepath.Dir(configsPath)). - ListConfigs() - if err != nil { - return map[string]config.Config{ - config.DefaultConfig.Name: config.DefaultConfig, - } - } - - return cfgs -} - -func defaultConfigPath() (string, string, error) { - dir, err := fs.InfluxDir() - if err != nil { - return "", "", err - } - return filepath.Join(dir, fs.DefaultConfigsFile), dir, nil -} - -func mustDefaultConfigPath() string { - filepath, _, err := defaultConfigPath() - if err != nil { - panic(err) - } - return filepath -} - -func migrateOldCredential(configsPath string) { - dir := filepath.Dir(configsPath) - if configsPath == "" || dir == "" { - return - } - - tokenFile := filepath.Join(dir, fs.DefaultTokenFile) - tokB, err := ioutil.ReadFile(tokenFile) - if err != nil { - return // no need for migration - } - - err = writeConfigToPath(strings.TrimSpace(string(tokB)), "", configsPath, dir) - if err != nil { - return - } - - // ignore the remove err - _ = os.Remove(tokenFile) -} - -func writeConfigToPath(tok, org, path, dir string) error { - p := &config.DefaultConfig - p.Token = tok - p.Org = org - - _, err := config.NewLocalConfigSVC(path, dir).CreateConfig(*p) - return err -} - -func checkSetup(host string, skipVerify bool) error { - httpClient, err := newHTTPClient() - if err != nil { - return err - } - - s := &tenant.OnboardClientService{Client: httpClient} - - isOnboarding, err := s.IsOnboarding(context.Background()) - if err != nil { - return err - } - - if isOnboarding { - return fmt.Errorf("the instance at %q has not been setup. Please run `influx setup` before issuing any additional commands", host) - } - - return nil -} - -func checkSetupRunEMiddleware(f *globalFlags) cobraRunEMiddleware { - return func(fn cobraRunEFn) cobraRunEFn { - return func(cmd *cobra.Command, args []string) error { - err := fn(cmd, args) - if err == nil { - return nil - } - - ac := f.config() - if setupErr := checkSetup(ac.Host, f.skipVerify); setupErr != nil && influxdb.EUnauthorized != influxdb.ErrorCode(setupErr) { - cmd.OutOrStderr().Write([]byte(fmt.Sprintf("Error: %s\n", internal.ErrorFmt(err).Error()))) - return internal.ErrorFmt(setupErr) - } - - return internal.ErrorFmt(err) - } - } -} - -// walk calls f for c and all of its children. -func walk(c *cobra.Command, f func(*cobra.Command)) { - f(c) - for _, c := range c.Commands() { - walk(c, f) - } -} - -type organization struct { - id, name string -} - -func (o *organization) register(v *viper.Viper, cmd *cobra.Command, persistent bool) { - opts := flagOpts{ - { - DestP: &o.id, - Flag: "org-id", - Desc: "The ID of the organization", - Persistent: persistent, - }, - { - DestP: &o.name, - Flag: "org", - Short: 'o', - Desc: "The name of the organization", - Persistent: persistent, - }, - } - opts.mustRegister(v, cmd) -} - -func (o *organization) getID(orgSVC influxdb.OrganizationService) (influxdb.ID, error) { - if o.id != "" { - influxOrgID, err := influxdb.IDFromString(o.id) - if err != nil { - return 0, fmt.Errorf("invalid org ID '%s' provided (did you pass an org name instead of an ID?): %w", o.id, err) - } - return *influxOrgID, nil - } - - getOrgByName := func(name string) (influxdb.ID, error) { - org, err := orgSVC.FindOrganization(context.Background(), influxdb.OrganizationFilter{ - Name: &name, - }) - if err != nil { - return 0, fmt.Errorf("failed to get ID for org '%s' (do you have org-level read permission?): %w", name, err) - } - return org.ID, nil - } - if o.name != "" { - return getOrgByName(o.name) - } - // last check is for the org set in the CLI config. This will be last in priority. - if ac := flags.config(); ac.Org != "" { - return getOrgByName(ac.Org) - } - return 0, fmt.Errorf("failed to locate organization criteria") -} - -func (o *organization) validOrgFlags(f *globalFlags) error { - if o.id == "" && o.name == "" && f != nil { - o.name = flags.config().Org - } - - if o.id == "" && o.name == "" { - return fmt.Errorf("must specify org-id, or org name") - } else if o.id != "" && o.name != "" { - return fmt.Errorf("must specify org-id, or org name not both") - } - return nil -} - -type flagOpts []cli.Opt - -func (f flagOpts) mustRegister(v *viper.Viper, cmd *cobra.Command) { - if len(f) == 0 { - return - } - - for i := range f { - envVar := f[i].Flag - if e := f[i].EnvVar; e != "" { - envVar = e - } - - f[i].Desc = fmt.Sprintf( - "%s; Maps to env var $INFLUX_%s", - f[i].Desc, - strings.ToUpper(strings.Replace(envVar, "-", "_", -1)), - ) - } - cli.BindOptions(v, cmd, f) -} - -func registerPrintOptions(v *viper.Viper, cmd *cobra.Command, headersP, jsonOutP *bool) { - var opts flagOpts - if headersP != nil { - opts = append(opts, cli.Opt{ - DestP: headersP, - Flag: "hide-headers", - EnvVar: "HIDE_HEADERS", - Desc: "Hide the table headers; defaults false", - Default: false, - }) - } - if jsonOutP != nil { - opts = append(opts, cli.Opt{ - DestP: jsonOutP, - Flag: "json", - EnvVar: "OUTPUT_JSON", - Desc: "Output data as json; defaults false", - Default: false, - }) - } - opts.mustRegister(v, cmd) -} - -func setViperOptions(v *viper.Viper) { - v.SetEnvPrefix("INFLUX") - v.AutomaticEnv() - v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) -} - -func writeJSON(w io.Writer, v interface{}) error { - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - return enc.Encode(v) -} diff --git a/cmd/influx/organization.go b/cmd/influx/organization.go deleted file mode 100644 index 4d2927b36ca..00000000000 --- a/cmd/influx/organization.go +++ /dev/null @@ -1,635 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/spf13/cobra" -) - -type orgSVCFn func() (influxdb.OrganizationService, influxdb.UserResourceMappingService, influxdb.UserService, error) - -func cmdOrganization(f *globalFlags, opts genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(newOrgServices, f, opts) - return builder.cmd() -} - -type cmdOrgBuilder struct { - genericCLIOpts - *globalFlags - - svcFn orgSVCFn - - json bool - hideHeaders bool - description string - id string - memberID string - name string -} - -func newCmdOrgBuilder(svcFn orgSVCFn, f *globalFlags, opts genericCLIOpts) *cmdOrgBuilder { - return &cmdOrgBuilder{ - genericCLIOpts: opts, - globalFlags: f, - svcFn: svcFn, - } -} - -func (b *cmdOrgBuilder) cmd() *cobra.Command { - cmd := b.genericCLIOpts.newCmd("org", nil, false) - cmd.Aliases = []string{"organization"} - cmd.Short = "Organization management commands" - cmd.Run = seeHelp - - cmd.AddCommand( - b.cmdCreate(), - b.cmdDelete(), - b.cmdFind(), - b.cmdMember(), - b.cmdUpdate(), - ) - - return cmd -} - -func (b *cmdOrgBuilder) cmdCreate() *cobra.Command { - cmd := b.newCmd("create", b.createRunEFn) - cmd.Short = "Create organization" - - b.registerPrintFlags(cmd) - cmd.Flags().StringVarP(&b.name, "name", "n", "", "The name of organization that will be created") - cmd.MarkFlagRequired("name") - cmd.Flags().StringVarP(&b.description, "description", "d", "", "The description of the organization that will be created") - - return cmd -} - -func (b *cmdOrgBuilder) createRunEFn(cmd *cobra.Command, args []string) error { - orgSvc, _, _, err := b.svcFn() - if err != nil { - return fmt.Errorf("failed to initialize org service client: %v", err) - } - - org := &influxdb.Organization{ - Name: b.name, - Description: b.description, - } - - if err := orgSvc.CreateOrganization(context.Background(), org); err != nil { - return fmt.Errorf("failed to create organization: %v", err) - } - - return b.printOrg(orgPrintOpt{org: org}) -} - -func (b *cmdOrgBuilder) cmdDelete() *cobra.Command { - cmd := b.newCmd("delete", b.deleteRunEFn) - cmd.Short = "Delete organization" - - opts := flagOpts{ - { - DestP: &b.id, - Flag: "id", - Short: 'i', - EnvVar: "ORG_ID", - Desc: "The organization ID", - }, - } - opts.mustRegister(b.viper, cmd) - b.registerPrintFlags(cmd) - - return cmd -} - -func (b *cmdOrgBuilder) deleteRunEFn(cmd *cobra.Command, args []string) error { - orgSvc, _, _, err := b.svcFn() - if err != nil { - return fmt.Errorf("failed to initialize org service client: %v", err) - } - - var id influxdb.ID - if err := id.DecodeFromString(b.id); err != nil { - return fmt.Errorf("failed to decode org id %s: %v", b.id, err) - } - - ctx := context.TODO() - o, err := orgSvc.FindOrganizationByID(ctx, id) - if err != nil { - return fmt.Errorf("failed to find org with id %q: %v", id, err) - } - - if err = orgSvc.DeleteOrganization(ctx, id); err != nil { - return fmt.Errorf("failed to delete org with id %q: %v", id, err) - } - - return b.printOrg(orgPrintOpt{ - deleted: true, - org: o, - }) -} - -func (b *cmdOrgBuilder) cmdFind() *cobra.Command { - cmd := b.newCmd("list", b.findRunEFn) - cmd.Short = "List organizations" - cmd.Aliases = []string{"find", "ls"} - - opts := flagOpts{ - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "ORG", - Desc: "The organization name", - }, - { - DestP: &b.id, - Flag: "id", - Short: 'i', - EnvVar: "ORG_ID", - Desc: "The organization ID", - }, - } - opts.mustRegister(b.viper, cmd) - b.registerPrintFlags(cmd) - - return cmd -} - -func (b *cmdOrgBuilder) findRunEFn(cmd *cobra.Command, args []string) error { - orgSvc, _, _, err := b.svcFn() - if err != nil { - return fmt.Errorf("failed to initialize org service client: %v", err) - } - - filter := influxdb.OrganizationFilter{} - if b.name != "" { - filter.Name = &b.name - } - - if b.id != "" { - id, err := influxdb.IDFromString(b.id) - if err != nil { - return fmt.Errorf("failed to decode org id %s: %v", b.id, err) - } - filter.ID = id - } - - orgs, _, err := orgSvc.FindOrganizations(context.Background(), filter) - if err != nil { - return fmt.Errorf("failed find orgs: %v", err) - } - - return b.printOrg(orgPrintOpt{orgs: orgs}) -} - -func (b *cmdOrgBuilder) cmdUpdate() *cobra.Command { - cmd := b.newCmd("update", b.updateRunEFn) - cmd.Short = "Update organization" - - opts := flagOpts{ - { - DestP: &b.id, - Flag: "id", - Short: 'i', - EnvVar: "ORG_ID", - Desc: "The organization ID (required)", - Required: true, - }, - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "ORG", - Desc: "The organization name", - }, - { - DestP: &b.description, - Flag: "description", - Short: 'd', - EnvVar: "ORG_DESCRIPTION", - Desc: "The organization name", - }, - } - opts.mustRegister(b.viper, cmd) - b.registerPrintFlags(cmd) - - return cmd -} - -func (b *cmdOrgBuilder) updateRunEFn(cmd *cobra.Command, args []string) error { - orgSvc, _, _, err := b.svcFn() - if err != nil { - return fmt.Errorf("failed to initialize org service client: %v", err) - } - - var id influxdb.ID - if err := id.DecodeFromString(b.id); err != nil { - return fmt.Errorf("failed to decode org id %s: %v", b.id, err) - } - - update := influxdb.OrganizationUpdate{} - if b.name != "" { - update.Name = &b.name - } - if b.description != "" { - update.Description = &b.description - } - - o, err := orgSvc.UpdateOrganization(context.Background(), id, update) - if err != nil { - return fmt.Errorf("failed to update org: %v", err) - } - - return b.printOrg(orgPrintOpt{org: o}) -} - -func (b *cmdOrgBuilder) printOrg(opts orgPrintOpt) error { - if b.json { - var v interface{} = opts.orgs - if opts.org != nil { - v = opts.org - } - return b.writeJSON(v) - } - - w := b.newTabWriter() - defer w.Flush() - - w.HideHeaders(b.hideHeaders) - - headers := []string{"ID", "Name"} - if opts.deleted { - headers = append(headers, "Deleted") - } - w.WriteHeaders(headers...) - - if opts.org != nil { - opts.orgs = append(opts.orgs, opts.org) - } - - for _, o := range opts.orgs { - m := map[string]interface{}{ - "ID": o.ID.String(), - "Name": o.Name, - } - if opts.deleted { - m["Deleted"] = true - } - w.Write(m) - } - - return nil -} - -func (b *cmdOrgBuilder) cmdMember() *cobra.Command { - cmd := b.genericCLIOpts.newCmd("members", nil, false) - cmd.Short = "Organization membership commands" - cmd.Run = seeHelp - - cmd.AddCommand( - b.cmdMemberAdd(), - b.cmdMemberList(), - b.cmdMemberRemove(), - ) - - return cmd -} - -func (b *cmdOrgBuilder) cmdMemberList() *cobra.Command { - cmd := b.newCmd("list", b.memberListRunEFn) - cmd.Short = "List organization members" - cmd.Aliases = []string{"find", "ls"} - - opts := flagOpts{ - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "ORG", - Desc: "The organization name", - }, - { - DestP: &b.id, - Flag: "id", - Short: 'i', - EnvVar: "ORG_ID", - Desc: "The organization ID", - }, - } - opts.mustRegister(b.viper, cmd) - b.registerPrintFlags(cmd) - return cmd -} - -func (b *cmdOrgBuilder) memberListRunEFn(cmd *cobra.Command, args []string) error { - orgSvc, urmSVC, userSVC, err := b.svcFn() - if err != nil { - return fmt.Errorf("failed to initialize org service client: %v", err) - } - - if b.id == "" && b.name == "" { - return fmt.Errorf("must specify exactly one of id and name") - } - - var filter influxdb.OrganizationFilter - if b.name != "" { - filter.Name = &b.name - } - - if b.id != "" { - var fID influxdb.ID - err := fID.DecodeFromString(b.id) - if err != nil { - return fmt.Errorf("failed to decode org id %s: %v", b.id, err) - } - filter.ID = &fID - } - - organization, err := orgSvc.FindOrganization(context.Background(), filter) - if err != nil { - return fmt.Errorf("failed to find org: %v", err) - } - - ctx := context.Background() - return b.memberList(ctx, urmSVC, userSVC, influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.OrgsResourceType, - ResourceID: organization.ID, - UserType: influxdb.Member, - }) -} - -func (b *cmdOrgBuilder) cmdMemberAdd() *cobra.Command { - cmd := b.newCmd("add", b.memberAddRunEFn) - cmd.Short = "Add organization member" - - cmd.Flags().StringVarP(&b.memberID, "member", "m", "", "The member ID") - cmd.MarkFlagRequired("member") - - opts := flagOpts{ - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "ORG", - Desc: "The organization name", - }, - { - DestP: &b.id, - Flag: "id", - Short: 'i', - EnvVar: "ORG_ID", - Desc: "The organization ID", - }, - } - opts.mustRegister(b.viper, cmd) - - return cmd -} - -func (b *cmdOrgBuilder) memberAddRunEFn(cmd *cobra.Command, args []string) error { - if b.id == "" && b.name == "" { - return fmt.Errorf("must specify exactly one of id and name") - } - if b.id != "" && b.name != "" { - return fmt.Errorf("must specify exactly one of id and name") - } - - orgSvc, urmSVC, _, err := b.svcFn() - if err != nil { - return fmt.Errorf("failed to initialize org service client: %v", err) - } - - var filter influxdb.OrganizationFilter - if b.name != "" { - filter.Name = &b.name - } - - if b.id != "" { - var fID influxdb.ID - err := fID.DecodeFromString(b.id) - if err != nil { - return fmt.Errorf("failed to decode org id %s: %v", b.id, err) - } - filter.ID = &fID - } - - ctx := context.Background() - organization, err := orgSvc.FindOrganization(ctx, filter) - if err != nil { - return fmt.Errorf("failed to find org: %v", err) - } - - var memberID influxdb.ID - err = memberID.DecodeFromString(b.memberID) - if err != nil { - return fmt.Errorf("failed to decode member id %s: %v", b.memberID, err) - } - - return addMember(ctx, b.w, urmSVC, influxdb.UserResourceMapping{ - ResourceID: organization.ID, - ResourceType: influxdb.OrgsResourceType, - MappingType: influxdb.UserMappingType, - UserID: memberID, - UserType: influxdb.Member, - }) -} - -func (b *cmdOrgBuilder) cmdMemberRemove() *cobra.Command { - cmd := b.newCmd("remove", b.membersRemoveRunEFn) - cmd.Short = "Remove organization member" - - opts := flagOpts{ - { - DestP: &b.name, - Flag: "name", - Short: 'n', - EnvVar: "ORG", - Desc: "The organization name", - }, - { - DestP: &b.id, - Flag: "id", - Short: 'i', - EnvVar: "ORG_ID", - Desc: "The organization ID", - }, - } - opts.mustRegister(b.viper, cmd) - - cmd.Flags().StringVarP(&b.memberID, "member", "m", "", "The member ID") - cmd.MarkFlagRequired("member") - - return cmd -} - -func (b *cmdOrgBuilder) membersRemoveRunEFn(cmd *cobra.Command, args []string) error { - if b.id == "" && b.name == "" { - return fmt.Errorf("must specify exactly one of id and name") - } - - if b.id != "" && b.name != "" { - return fmt.Errorf("must specify exactly one of id and name") - } - - orgSvc, urmSVC, _, err := b.svcFn() - if err != nil { - return fmt.Errorf("failed to initialize org service client: %v", err) - } - - var filter influxdb.OrganizationFilter - if b.name != "" { - filter.Name = &b.name - } - - if b.id != "" { - var fID influxdb.ID - err := fID.DecodeFromString(b.id) - if err != nil { - return fmt.Errorf("failed to decode org id %s: %v", b.id, err) - } - filter.ID = &fID - } - - ctx := context.Background() - organization, err := orgSvc.FindOrganization(ctx, filter) - if err != nil { - return fmt.Errorf("failed to find organization: %v", err) - } - - var memberID influxdb.ID - err = memberID.DecodeFromString(b.memberID) - if err != nil { - return fmt.Errorf("failed to decode member id %s: %v", b.memberID, err) - } - - return removeMember(ctx, b.w, urmSVC, organization.ID, memberID) -} - -func (b *cmdOrgBuilder) newCmd(use string, runE func(*cobra.Command, []string) error) *cobra.Command { - cmd := b.genericCLIOpts.newCmd(use, runE, true) - b.globalFlags.registerFlags(b.viper, cmd) - return cmd -} - -func (b *cmdOrgBuilder) registerPrintFlags(cmd *cobra.Command) { - registerPrintOptions(b.viper, cmd, &b.hideHeaders, &b.json) -} - -func newOrgServices() (influxdb.OrganizationService, influxdb.UserResourceMappingService, influxdb.UserService, error) { - client, err := newHTTPClient() - if err != nil { - return nil, nil, nil, err - } - - orgSVC := &tenant.OrgClientService{Client: client} - urmSVC := &tenant.UserResourceMappingClient{Client: client} - userSVC := &tenant.UserClientService{Client: client} - - return orgSVC, urmSVC, userSVC, nil -} - -func newOrganizationService() (influxdb.OrganizationService, error) { - client, err := newHTTPClient() - if err != nil { - return nil, err - } - - return &tenant.OrgClientService{ - Client: client, - }, nil -} - -func (b *cmdOrgBuilder) memberList(ctx context.Context, urmSVC influxdb.UserResourceMappingService, userSVC influxdb.UserService, f influxdb.UserResourceMappingFilter) error { - mappings, _, err := urmSVC.FindUserResourceMappings(ctx, f) - if err != nil { - return fmt.Errorf("failed to find members: %v", err) - } - - var ( - ursC = make(chan struct { - User *influxdb.User - Index int - }) - errC = make(chan error) - sem = make(chan struct{}, maxTCPConnections) - ) - for k, v := range mappings { - sem <- struct{}{} - go func(k int, v *influxdb.UserResourceMapping) { - defer func() { <-sem }() - usr, err := userSVC.FindUserByID(ctx, v.UserID) - if err != nil { - errC <- fmt.Errorf("failed to retrieve user details: %v", err) - return - } - ursC <- struct { - User *influxdb.User - Index int - }{ - User: usr, - Index: k, - } - }(k, v) - } - - users := make([]*influxdb.User, len(mappings)) - for i := 0; i < len(mappings); i++ { - select { - case <-ctx.Done(): - return &influxdb.Error{ - Msg: "Timeout retrieving user details", - } - case err := <-errC: - return err - case item := <-ursC: - users[item.Index] = item.User - } - } - - if b.json { - return b.writeJSON(users) - } - - tw := b.newTabWriter() - defer tw.Flush() - - tw.HideHeaders(b.hideHeaders) - - tw.WriteHeaders("ID", "Name", "User Type", "Status") - for idx, m := range users { - tw.Write(map[string]interface{}{ - "ID": m.ID.String(), - "User Name": m.Name, - "User Type": string(mappings[idx].UserType), - "Status": string(m.Status), - }) - } - - return nil -} - -func addMember(ctx context.Context, w io.Writer, urmSVC influxdb.UserResourceMappingService, urm influxdb.UserResourceMapping) error { - if err := urmSVC.CreateUserResourceMapping(ctx, &urm); err != nil { - return fmt.Errorf("failed to add member: %v", err) - } - _, err := fmt.Fprintf(w, "user %s has been added as a %s of %s: %s\n", urm.UserID, urm.UserType, urm.ResourceType, urm.ResourceID) - return err -} - -func removeMember(ctx context.Context, w io.Writer, urmSVC influxdb.UserResourceMappingService, resourceID, userID influxdb.ID) error { - if err := urmSVC.DeleteUserResourceMapping(ctx, resourceID, userID); err != nil { - return fmt.Errorf("failed to remove member: %v", err) - } - _, err := fmt.Fprintf(w, "userID %s has been removed from ResourceID %s\n", userID, resourceID) - return err -} - -type orgPrintOpt struct { - deleted bool - org *influxdb.Organization - orgs []*influxdb.Organization -} diff --git a/cmd/influx/organization_test.go b/cmd/influx/organization_test.go deleted file mode 100644 index 036c47b6cd1..00000000000 --- a/cmd/influx/organization_test.go +++ /dev/null @@ -1,590 +0,0 @@ -package main - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/mock" - "github.com/spf13/cobra" - "github.com/stretchr/testify/require" -) - -func TestCmdOrg(t *testing.T) { - fakeOrgSVCFn := func(svc influxdb.OrganizationService) orgSVCFn { - return func() (influxdb.OrganizationService, influxdb.UserResourceMappingService, influxdb.UserService, error) { - return svc, mock.NewUserResourceMappingService(), mock.NewUserService(), nil - } - } - - fakeOrgUrmSVCsFn := func(svc influxdb.OrganizationService, urmSVC influxdb.UserResourceMappingService) orgSVCFn { - return func() (influxdb.OrganizationService, influxdb.UserResourceMappingService, influxdb.UserService, error) { - return svc, urmSVC, mock.NewUserService(), nil - } - } - - t.Run("create", func(t *testing.T) { - tests := []struct { - name string - expected influxdb.Organization - flags []string - }{ - { - name: "all", - flags: []string{"--name=new name", "--description=desc"}, - expected: influxdb.Organization{ - Name: "new name", - Description: "desc", - }, - }, - { - name: "shorts", - flags: []string{"-n=new name", "-d=desc"}, - expected: influxdb.Organization{ - Name: "new name", - Description: "desc", - }, - }, - } - - cmdFn := func(expectedOrg influxdb.Organization) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := mock.NewOrganizationService() - svc.CreateOrganizationF = func(ctx context.Context, org *influxdb.Organization) error { - if expectedOrg != *org { - return fmt.Errorf("unexpected org;\n\twant= %+v\n\tgot= %+v", expectedOrg, *org) - } - return nil - } - - return func(f *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(fakeOrgSVCFn(svc), f, opt) - return builder.cmd() - } - } - - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.expected)) - cmd.SetArgs(append([]string{"org", "create"}, tt.flags...)) - - require.NoError(t, cmd.Execute()) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("delete", func(t *testing.T) { - tests := []struct { - name string - expectedID influxdb.ID - flag string - }{ - { - name: "id", - expectedID: influxdb.ID(1), - flag: "--id=", - }, - { - name: "shorts", - expectedID: influxdb.ID(1), - flag: "-i=", - }, - } - - cmdFn := func(expectedID influxdb.ID) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := mock.NewOrganizationService() - svc.FindOrganizationByIDF = func(ctx context.Context, id influxdb.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ID: id}, nil - } - svc.DeleteOrganizationF = func(ctx context.Context, id influxdb.ID) error { - if expectedID != id { - return fmt.Errorf("unexpected id:\n\twant= %s\n\tgot= %s", expectedID, id) - } - return nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(fakeOrgSVCFn(svc), g, opt) - return builder.cmd() - } - } - - for _, tt := range tests { - fn := func(t *testing.T) { - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.expectedID)) - idFlag := tt.flag + tt.expectedID.String() - cmd.SetArgs([]string{"org", "find", idFlag}) - - require.NoError(t, cmd.Execute()) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("list", func(t *testing.T) { - type called struct { - name string - id influxdb.ID - } - - tests := []struct { - name string - expected called - flags []string - command string - envVars map[string]string - }{ - { - name: "org id", - flags: []string{"--id=" + influxdb.ID(3).String()}, - envVars: envVarsZeroMap, - expected: called{id: 3}, - }, - { - name: "name", - flags: []string{"--name=name1"}, - envVars: envVarsZeroMap, - expected: called{name: "name1"}, - }, - { - name: "shorts", - flags: []string{ - "-n=name1", - "-i=" + influxdb.ID(1).String(), - }, - envVars: envVarsZeroMap, - expected: called{name: "name1", id: 1}, - }, - { - name: "env vars", - envVars: map[string]string{ - "INFLUX_ORG_ID": influxdb.ID(1).String(), - "INFLUX_ORG": "name1", - }, - flags: []string{"-i=" + influxdb.ID(1).String()}, - expected: called{name: "name1", id: 1}, - }, - { - name: "ls alias", - command: "ls", - flags: []string{"--name=name1"}, - envVars: envVarsZeroMap, - expected: called{name: "name1"}, - }, - { - name: "find alias", - command: "find", - flags: []string{"--name=name1"}, - envVars: envVarsZeroMap, - expected: called{name: "name1"}, - }, - } - - cmdFn := func() (func(*globalFlags, genericCLIOpts) *cobra.Command, *called) { - calls := new(called) - - svc := mock.NewOrganizationService() - svc.FindOrganizationsF = func(ctx context.Context, f influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - if f.ID != nil { - calls.id = *f.ID - } - if f.Name != nil { - calls.name = *f.Name - } - return nil, 0, nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(fakeOrgSVCFn(svc), g, opt) - return builder.cmd() - }, calls - } - - for _, tt := range tests { - fn := func(t *testing.T) { - defer addEnvVars(t, tt.envVars)() - - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmdFn, calls := cmdFn() - cmd := builder.cmd(cmdFn) - - if tt.command == "" { - tt.command = "list" - } - - cmd.SetArgs(append([]string{"org", tt.command}, tt.flags...)) - - require.NoError(t, cmd.Execute()) - assert.Equal(t, tt.expected, *calls) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("update", func(t *testing.T) { - tests := []struct { - name string - expected influxdb.OrganizationUpdate - flags []string - envVars map[string]string - }{ - { - name: "basic just name", - flags: []string{ - "--id=" + influxdb.ID(3).String(), - "--name=new name", - }, - expected: influxdb.OrganizationUpdate{ - Name: strPtr("new name"), - }, - }, - { - name: "with all fields", - flags: []string{ - "--id=" + influxdb.ID(3).String(), - "--name=new name", - "--description=desc", - }, - expected: influxdb.OrganizationUpdate{ - Name: strPtr("new name"), - Description: strPtr("desc"), - }, - }, - { - name: "shorts", - flags: []string{ - "-i=" + influxdb.ID(3).String(), - "-n=new name", - "-d=desc", - }, - expected: influxdb.OrganizationUpdate{ - Name: strPtr("new name"), - Description: strPtr("desc"), - }, - }, - { - name: "env var", - envVars: map[string]string{ - "INFLUX_ORG": "new name", - "INFLUX_ORG_ID": influxdb.ID(3).String(), - "INFLUX_ORG_DESCRIPTION": "desc", - }, - expected: influxdb.OrganizationUpdate{ - Name: strPtr("new name"), - Description: strPtr("desc"), - }, - }, - } - - cmdFn := func(expectedUpdate influxdb.OrganizationUpdate) func(*globalFlags, genericCLIOpts) *cobra.Command { - svc := mock.NewOrganizationService() - svc.UpdateOrganizationF = func(ctx context.Context, id influxdb.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - if id != 3 { - return nil, fmt.Errorf("unexpecte id:\n\twant= %s\n\tgot= %s", influxdb.ID(3), id) - } - if !reflect.DeepEqual(expectedUpdate, upd) { - return nil, fmt.Errorf("unexpected bucket update;\n\twant= %+v\n\tgot= %+v", expectedUpdate, upd) - } - return &influxdb.Organization{}, nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(fakeOrgSVCFn(svc), g, opt) - return builder.cmd() - } - } - - for _, tt := range tests { - fn := func(t *testing.T) { - defer addEnvVars(t, tt.envVars)() - - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(ioutil.Discard), - ) - cmd := builder.cmd(cmdFn(tt.expected)) - cmd.SetArgs(append([]string{"org", "update"}, tt.flags...)) - - require.NoError(t, cmd.Execute()) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("members", func(t *testing.T) { - type ( - called struct { - name string - id influxdb.ID - memberID influxdb.ID - } - - testCase struct { - name string - expected called - memberFlags []string - envVars map[string]string - } - ) - - testMemberFn := func(t *testing.T, cmdName string, cmdFn func() (func(*globalFlags, genericCLIOpts) *cobra.Command, *called), testCases ...testCase) { - for _, tt := range testCases { - fn := func(t *testing.T) { - envVars := tt.envVars - if len(envVars) == 0 { - envVars = envVarsZeroMap - } - defer addEnvVars(t, envVars)() - - outBuf := new(bytes.Buffer) - defer func() { - if t.Failed() && outBuf.Len() > 0 { - t.Log(outBuf.String()) - } - }() - - builder := newInfluxCmdBuilder( - in(new(bytes.Buffer)), - out(outBuf), - ) - nestedCmd, calls := cmdFn() - cmd := builder.cmd(nestedCmd) - cmd.SetArgs(append([]string{"org", "members", cmdName}, tt.memberFlags...)) - - require.NoError(t, cmd.Execute()) - assert.Equal(t, tt.expected, *calls) - } - - t.Run(tt.name, fn) - } - } - - t.Run("list", func(t *testing.T) { - tests := []testCase{ - { - name: "org id", - memberFlags: []string{"--id=" + influxdb.ID(3).String()}, - envVars: envVarsZeroMap, - expected: called{id: 3}, - }, - { - name: "org id short", - memberFlags: []string{"-i=" + influxdb.ID(3).String()}, - envVars: envVarsZeroMap, - expected: called{id: 3}, - }, - { - name: "org id env var", - envVars: map[string]string{ - "INFLUX_ORG": "", - "INFLUX_ORG_ID": influxdb.ID(3).String(), - }, - expected: called{id: 3}, - }, - { - name: "name", - memberFlags: []string{"--name=name1"}, - envVars: envVarsZeroMap, - expected: called{name: "name1"}, - }, - { - name: "name short", - memberFlags: []string{"-n=name1"}, - envVars: envVarsZeroMap, - expected: called{name: "name1"}, - }, - { - name: "name env var", - envVars: map[string]string{"INFLUX_ORG": "name1"}, - expected: called{name: "name1"}, - }, - } - - cmdFn := func() (func(*globalFlags, genericCLIOpts) *cobra.Command, *called) { - calls := new(called) - - svc := mock.NewOrganizationService() - svc.FindOrganizationF = func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - if f.ID != nil { - calls.id = *f.ID - } - if f.Name != nil { - calls.name = *f.Name - } - return &influxdb.Organization{ID: 1}, nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(fakeOrgSVCFn(svc), g, opt) - return builder.cmd() - }, calls - } - - testMemberFn(t, "list", cmdFn, tests...) - testMemberFn(t, "ls", cmdFn, tests[0:1]...) - testMemberFn(t, "find", cmdFn, tests[0:1]...) - }) - - t.Run("add", func(t *testing.T) { - cmdFn := func() (func(*globalFlags, genericCLIOpts) *cobra.Command, *called) { - calls := new(called) - - svc := mock.NewOrganizationService() - svc.FindOrganizationF = func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - if f.ID != nil { - calls.id = *f.ID - } - if f.Name != nil { - calls.name = *f.Name - } - return &influxdb.Organization{ID: 1}, nil - } - urmSVC := mock.NewUserResourceMappingService() - urmSVC.CreateMappingFn = func(ctx context.Context, m *influxdb.UserResourceMapping) error { - calls.memberID = m.UserID - return nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(fakeOrgUrmSVCsFn(svc, urmSVC), g, opt) - return builder.cmd() - }, calls - } - - addTests := []testCase{ - { - name: "org id", - memberFlags: []string{ - "--id=" + influxdb.ID(3).String(), - "--member=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{id: 3, memberID: 4}, - }, - { - name: "org id shorts", - memberFlags: []string{ - "-i=" + influxdb.ID(3).String(), - "-m=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{id: 3, memberID: 4}, - }, - { - name: "org name", - memberFlags: []string{ - "--name=name1", - "--member=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{name: "name1", memberID: 4}, - }, - { - name: "org name shorts", - memberFlags: []string{ - "-n=name1", - "-m=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{name: "name1", memberID: 4}, - }, - } - - testMemberFn(t, "add", cmdFn, addTests...) - }) - - t.Run("remove", func(t *testing.T) { - cmdFn := func() (func(*globalFlags, genericCLIOpts) *cobra.Command, *called) { - calls := new(called) - - svc := mock.NewOrganizationService() - svc.FindOrganizationF = func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - if f.ID != nil { - calls.id = *f.ID - } - if f.Name != nil { - calls.name = *f.Name - } - return &influxdb.Organization{ID: 1}, nil - } - urmSVC := mock.NewUserResourceMappingService() - urmSVC.DeleteMappingFn = func(ctx context.Context, resourceID, userID influxdb.ID) error { - calls.memberID = userID - return nil - } - - return func(g *globalFlags, opt genericCLIOpts) *cobra.Command { - builder := newCmdOrgBuilder(fakeOrgUrmSVCsFn(svc, urmSVC), g, opt) - return builder.cmd() - }, calls - } - - addTests := []testCase{ - { - name: "org id", - memberFlags: []string{ - "--id=" + influxdb.ID(3).String(), - "--member=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{id: 3, memberID: 4}, - }, - { - name: "org id shorts", - memberFlags: []string{ - "-i=" + influxdb.ID(3).String(), - "-m=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{id: 3, memberID: 4}, - }, - { - name: "org name", - memberFlags: []string{ - "--name=name1", - "--member=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{name: "name1", memberID: 4}, - }, - { - name: "org name shorts", - memberFlags: []string{ - "-n=name1", - "-m=" + influxdb.ID(4).String(), - }, - envVars: envVarsZeroMap, - expected: called{name: "name1", memberID: 4}, - }, - } - - testMemberFn(t, "remove", cmdFn, addTests...) - }) - }) -} - -var envVarsZeroMap = map[string]string{ - "INFLUX_ORG_ID": "", - "INFLUX_ORG": "", -} diff --git a/cmd/influx/ping.go b/cmd/influx/ping.go deleted file mode 100644 index 0f2c1ac42e0..00000000000 --- a/cmd/influx/ping.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/spf13/cobra" -) - -func cmdPing(f *globalFlags, opts genericCLIOpts) *cobra.Command { - runE := func(cmd *cobra.Command, args []string) error { - c := http.Client{ - Timeout: 5 * time.Second, - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: flags.skipVerify}, - }, - } - url := flags.config().Host + "/health" - resp, err := c.Get(url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode/100 != 2 { - return fmt.Errorf("got %d from '%s'", resp.StatusCode, url) - } - - var healthResponse check.Response - if err = json.NewDecoder(resp.Body).Decode(&healthResponse); err != nil { - return err - } - - if healthResponse.Status == check.StatusPass { - fmt.Println("OK") - } else { - return fmt.Errorf("health check failed: '%s'", healthResponse.Message) - } - - return nil - } - - cmd := opts.newCmd("ping", runE, true) - cmd.Short = "Check the InfluxDB /health endpoint" - cmd.Long = `Checks the health of a running InfluxDB instance by querying /health. Does not require valid token.` - f.registerFlags(opts.viper, cmd, "token") - - return cmd -} diff --git a/cmd/influx/query.go b/cmd/influx/query.go deleted file mode 100644 index 2d59527a8ac..00000000000 --- a/cmd/influx/query.go +++ /dev/null @@ -1,434 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "sort" - "strconv" - "strings" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/values" - ihttp "github.com/influxdata/influxdb/v2/http" - "github.com/spf13/cobra" -) - -var queryFlags struct { - org organization - file string - raw bool -} - -func cmdQuery(f *globalFlags, opts genericCLIOpts) *cobra.Command { - cmd := opts.newCmd("query [query literal or -f /path/to/query.flux]", fluxQueryF, true) - cmd.Short = "Execute a Flux query" - cmd.Long = `Execute a Flux query provided via the first argument or a file or stdin` - cmd.Args = cobra.MaximumNArgs(1) - - f.registerFlags(opts.viper, cmd) - queryFlags.org.register(opts.viper, cmd, true) - cmd.Flags().StringVarP(&queryFlags.file, "file", "f", "", "Path to Flux query file") - cmd.Flags().BoolVarP(&queryFlags.raw, "raw", "r", false, "Display raw query results") - - return cmd -} - -// readFluxQuery returns first argument, file contents or stdin -func readFluxQuery(args []string, file string) (string, error) { - // backward compatibility - if len(args) > 0 { - if strings.HasPrefix(args[0], "@") { - file = args[0][1:] - args = args[:0] - } else if args[0] == "-" { - file = "" - args = args[:0] - } - } - - var query string - switch { - case len(args) > 0: - query = args[0] - case len(file) > 0: - content, err := ioutil.ReadFile(file) - if err != nil { - return "", err - } - query = string(content) - default: - content, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return "", err - } - query = string(content) - } - return query, nil -} - -func fluxQueryF(cmd *cobra.Command, args []string) error { - if err := queryFlags.org.validOrgFlags(&flags); err != nil { - return err - } - - q, err := readFluxQuery(args, queryFlags.file) - if err != nil { - return fmt.Errorf("failed to load query: %v", err) - } - - u, err := url.Parse(flags.config().Host) - if err != nil { - return fmt.Errorf("unable to parse host: %s", err) - } - - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - u.Path += "api/v2/query" - - params := url.Values{} - if queryFlags.org.id != "" { - params.Set("orgID", queryFlags.org.id) - } else { - params.Set("org", queryFlags.org.name) - } - u.RawQuery = params.Encode() - - body, _ := json.Marshal(map[string]interface{}{ - "query": q, - "type": "flux", - "dialect": map[string]interface{}{ - "annotations": []string{"group", "datatype", "default"}, - "delimiter": ",", - "header": true, - }, - }) - - req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(body)) - req.Header.Set("Authorization", "Token "+flags.config().Token) - req.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer func() { _ = resp.Body.Close() }() - - if err := ihttp.CheckError(resp); err != nil { - return err - } - - if queryFlags.raw { - io.Copy(os.Stdout, resp.Body) - return nil - } - - dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{}) - results, err := dec.Decode(resp.Body) - if err != nil { - return fmt.Errorf("query decode error: %s", err) - } - defer results.Release() - - for results.More() { - res := results.Next() - fmt.Println("Result:", res.Name()) - - if err := res.Tables().Do(func(tbl flux.Table) error { - _, err := newFormatter(tbl).WriteTo(os.Stdout) - return err - }); err != nil { - return err - } - } - // It is safe and appropriate to call Release multiple times and must be - // called before checking the error on the next line. - results.Release() - return results.Err() -} - -// Below is a copy and trimmed version of the execute/format.go file from flux. -// It is copied here to avoid requiring a dependency on the execute package which -// may pull in the flux runtime as a dependency. -// In the future, the formatters and other primitives such as the csv parser should -// probably be separated out into user libraries anyway. - -const fixedWidthTimeFmt = "2006-01-02T15:04:05.000000000Z" - -// formatter writes a table to a Writer. -type formatter struct { - tbl flux.Table - widths []int - maxWidth int - newWidths []int - pad []byte - dash []byte - // fmtBuf is used to format values - fmtBuf [64]byte - - cols orderedCols -} - -var eol = []byte{'\n'} - -// newFormatter creates a formatter for a given table. -func newFormatter(tbl flux.Table) *formatter { - return &formatter{ - tbl: tbl, - } -} - -type writeToHelper struct { - w io.Writer - n int64 - err error -} - -func (w *writeToHelper) write(data []byte) { - if w.err != nil { - return - } - n, err := w.w.Write(data) - w.n += int64(n) - w.err = err -} - -var minWidthsByType = map[flux.ColType]int{ - flux.TBool: 12, - flux.TInt: 26, - flux.TUInt: 27, - flux.TFloat: 28, - flux.TString: 22, - flux.TTime: len(fixedWidthTimeFmt), - flux.TInvalid: 10, -} - -// WriteTo writes the formatted table data to w. -func (f *formatter) WriteTo(out io.Writer) (int64, error) { - w := &writeToHelper{w: out} - - // Sort cols - cols := f.tbl.Cols() - f.cols = newOrderedCols(cols, f.tbl.Key()) - sort.Sort(f.cols) - - // Compute header widths - f.widths = make([]int, len(cols)) - for j, c := range cols { - // Column header is "