diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 44bfbfcd729d0..92b75accdcdb5 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -35,4 +35,5 @@ BWC_VERSION: - "1.2.2" - "1.2.3" - "1.2.4" + - "1.2.5" - "1.3.0" diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index 58f83003994e3..f73122ee21a6b 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -14,6 +14,6 @@ # are 'java' or 'openjdk' followed by the major release number. OPENSEARCH_BUILD_JAVA=openjdk11 -OPENSEARCH_RUNTIME_JAVA=java8 +OPENSEARCH_RUNTIME_JAVA=java11 GRADLE_TASK=build GRADLE_EXTRA_ARGS= diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000..0db72d2a19cd1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,2 @@ +# disable blank issue creation +blank_issues_enabled: false diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7e44ad238d131..ca972d1b242e3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,881 +1,871 @@ updates: - directory: / - open-pull-requests-limit: 10 + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /benchmarks/build.gradle - open-pull-requests-limit: 10 + - directory: /benchmarks/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/reaper/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/reaper/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/darwin-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/darwin-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/testKit/opensearch-build-resources/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/testKit/opensearch-build-resources/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/testKit/opensearch.build/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/testKit/opensearch.build/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/testKit/reaper/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/testKit/reaper/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/testKit/symbolic-link-preserving-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/testKit/symbolic-link-preserving-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/testKit/testingConventions/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/testKit/testingConventions/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/testKit/thirdPartyAudit/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/testKit/thirdPartyAudit/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle - open-pull-requests-limit: 10 + - directory: /buildSrc/src/testKit/thirdPartyAudit/sample_jars/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /client/benchmark/build.gradle - open-pull-requests-limit: 10 + - directory: /client/benchmark/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /client/client-benchmark-noop-api-plugin/build.gradle - open-pull-requests-limit: 10 + - directory: /client/client-benchmark-noop-api-plugin/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /client/rest/build.gradle - open-pull-requests-limit: 10 + - directory: /client/rest/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /client/rest-high-level/build.gradle - open-pull-requests-limit: 10 + - directory: /client/rest-high-level/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /client/sniffer/build.gradle - open-pull-requests-limit: 10 + - directory: /client/sniffer/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /client/test/build.gradle - open-pull-requests-limit: 10 + - directory: /client/test/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /client/transport/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/darwin-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/darwin-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/integ-test-zip/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/integ-test-zip/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/linux-arm64-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/linux-arm64-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/linux-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/linux-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/no-jdk-darwin-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/no-jdk-darwin-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/no-jdk-linux-tar/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/no-jdk-linux-tar/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/no-jdk-windows-zip/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/no-jdk-windows-zip/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/archives/windows-zip/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/archives/windows-zip/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/bwc/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/bwc/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/bwc/bugfix/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/bwc/bugfix/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/bwc/maintenance/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/bwc/maintenance/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/bwc/minor/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/bwc/minor/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/bwc/staged/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/bwc/staged/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/docker/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/docker/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/docker/docker-arm64-export/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/docker/docker-arm64-export/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/docker/docker-build-context/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/docker/docker-build-context/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/docker/docker-export/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/docker/docker-export/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/packages/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/packages/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/packages/arm64-deb/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/packages/arm64-deb/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/packages/arm64-rpm/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/packages/arm64-rpm/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/packages/deb/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/packages/deb/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/packages/no-jdk-deb/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/packages/no-jdk-deb/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/packages/no-jdk-rpm/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/packages/no-jdk-rpm/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/packages/rpm/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/packages/rpm/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/tools/java-version-checker/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/tools/java-version-checker/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/tools/keystore-cli/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/tools/keystore-cli/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/tools/launchers/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/tools/launchers/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/tools/plugin-cli/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/tools/plugin-cli/build.gradle - open-pull-requests-limit: 10 + - directory: /distribution/tools/upgrade-cli/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /distribution/tools/upgrade-cli/build.gradle - open-pull-requests-limit: 10 + - directory: /doc-tools/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /doc-tools/build.gradle - open-pull-requests-limit: 10 + - directory: /doc-tools/missing-doclet/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /doc-tools/missing-doclet/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/cli/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/cli/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/core/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/core/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/dissect/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/dissect/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/geo/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/geo/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/grok/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/grok/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/nio/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/nio/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/plugin-classloader/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/plugin-classloader/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/secure-sm/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/secure-sm/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/ssl-config/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/ssl-config/build.gradle - open-pull-requests-limit: 10 + - directory: /libs/x-content/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /libs/x-content/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/aggs-matrix-stats/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/aggs-matrix-stats/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/analysis-common/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/analysis-common/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/geo/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/geo/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/ingest-common/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/ingest-common/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/ingest-geoip/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/ingest-geoip/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/ingest-user-agent/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/ingest-user-agent/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/lang-expression/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/lang-expression/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/lang-mustache/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/lang-mustache/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/lang-painless/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/lang-painless/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/lang-painless/spi/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/lang-painless/spi/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/mapper-extras/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/mapper-extras/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/opensearch-dashboards/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/opensearch-dashboards/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/parent-join/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/parent-join/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/percolator/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/percolator/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/rank-eval/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/rank-eval/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/reindex/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/reindex/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/repository-url/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/repository-url/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/systemd/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/systemd/build.gradle - open-pull-requests-limit: 10 + - directory: /modules/transport-netty4/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /modules/transport-netty4/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/analysis-icu/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/analysis-icu/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/analysis-kuromoji/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/analysis-kuromoji/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/analysis-nori/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/analysis-nori/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/analysis-phonetic/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/analysis-phonetic/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/analysis-smartcn/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/analysis-smartcn/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/analysis-stempel/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/analysis-stempel/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/analysis-ukrainian/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/analysis-ukrainian/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/discovery-azure-classic/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/discovery-azure-classic/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/discovery-ec2/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/discovery-ec2/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/discovery-ec2/qa/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/discovery-ec2/qa/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/discovery-ec2/qa/amazon-ec2/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/discovery-ec2/qa/amazon-ec2/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/discovery-gce/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/discovery-gce/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/discovery-gce/qa/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/discovery-gce/qa/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/discovery-gce/qa/gce/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/discovery-gce/qa/gce/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/custom-settings/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/custom-settings/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/custom-significance-heuristic/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/custom-significance-heuristic/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/custom-suggester/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/custom-suggester/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/painless-whitelist/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/painless-whitelist/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/rescore/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/rescore/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/rest-handler/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/rest-handler/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/examples/script-expert-scoring/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/examples/script-expert-scoring/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/ingest-attachment/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/ingest-attachment/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/mapper-annotated-text/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/mapper-annotated-text/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/mapper-murmur3/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/mapper-murmur3/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/mapper-size/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/mapper-size/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/repository-azure/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/repository-azure/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/repository-gcs/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/repository-gcs/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/repository-hdfs/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/repository-hdfs/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/repository-s3/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/repository-s3/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/store-smb/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/store-smb/build.gradle - open-pull-requests-limit: 10 + - directory: /plugins/transport-nio/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /plugins/transport-nio/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/ccs-unavailable-clusters/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/ccs-unavailable-clusters/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/die-with-dignity/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/die-with-dignity/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/evil-tests/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/evil-tests/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/full-cluster-restart/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/full-cluster-restart/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/logging-config/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/logging-config/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/mixed-cluster/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/mixed-cluster/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/multi-cluster-search/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/multi-cluster-search/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/no-bootstrap-tests/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/no-bootstrap-tests/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/centos-6/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/centos-6/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/centos-7/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/centos-7/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/debian-8/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/debian-8/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/debian-9/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/debian-9/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/fedora-28/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/fedora-28/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/fedora-29/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/fedora-29/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/oel-6/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/oel-6/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/oel-7/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/oel-7/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/sles-12/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/sles-12/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/ubuntu-1604/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/ubuntu-1604/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/ubuntu-1804/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/ubuntu-1804/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/windows-2012r2/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/windows-2012r2/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/os/windows-2016/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/os/windows-2016/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/remote-clusters/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/remote-clusters/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/repository-multi-version/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/repository-multi-version/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/rolling-upgrade/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/rolling-upgrade/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/smoke-test-http/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/smoke-test-client/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/smoke-test-ingest-disabled/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/smoke-test-http/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/smoke-test-ingest-with-all-dependencies/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/smoke-test-ingest-disabled/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/smoke-test-multinode/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/smoke-test-ingest-with-all-dependencies/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/smoke-test-plugins/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/smoke-test-multinode/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/translog-policy/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/smoke-test-plugins/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/unconfigured-node-name/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/translog-policy/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/verify-version-constants/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/unconfigured-node-name/build.gradle - open-pull-requests-limit: 10 + - directory: /qa/wildfly/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/verify-version-constants/build.gradle - open-pull-requests-limit: 10 + - directory: /rest-api-spec/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /qa/wildfly/build.gradle - open-pull-requests-limit: 10 + - directory: /sandbox/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /rest-api-spec/build.gradle - open-pull-requests-limit: 10 + - directory: /sandbox/libs/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /sandbox/build.gradle - open-pull-requests-limit: 10 + - directory: /sandbox/modules/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /sandbox/libs/build.gradle - open-pull-requests-limit: 10 + - directory: /sandbox/plugins/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /sandbox/modules/build.gradle - open-pull-requests-limit: 10 + - directory: /server/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /sandbox/plugins/build.gradle - open-pull-requests-limit: 10 + - directory: /test/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /server/build.gradle - open-pull-requests-limit: 10 + - directory: /test/external-modules/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/build.gradle - open-pull-requests-limit: 10 + - directory: /test/external-modules/delayed-aggs/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/external-modules/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/external-modules/delayed-aggs/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/azure-fixture/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/gcs-fixture/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/azure-fixture/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/hdfs-fixture/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/gcs-fixture/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/krb5kdc-fixture/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/hdfs-fixture/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/minio-fixture/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/krb5kdc-fixture/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/old-elasticsearch/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/minio-fixture/build.gradle - open-pull-requests-limit: 10 + - directory: /test/fixtures/s3-fixture/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/old-elasticsearch/build.gradle - open-pull-requests-limit: 10 + - directory: /test/framework/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly - - directory: /test/fixtures/s3-fixture/build.gradle - open-pull-requests-limit: 10 - package-ecosystem: gradle - schedule: - interval: weekly - - directory: /test/framework/build.gradle - open-pull-requests-limit: 10 - package-ecosystem: gradle - schedule: - interval: weekly - - directory: /test/logger-usage/build.gradle - open-pull-requests-limit: 10 + - directory: /test/logger-usage/ + open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index d4a5308406a39..e47d8d88c0243 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -1,6 +1,6 @@ name: Backport on: - pull_request: + pull_request_target: types: - closed - labeled @@ -13,7 +13,16 @@ jobs: pull-requests: write name: Backport steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - name: Backport - uses: tibdex/backport@v1 + uses: VachaShah/backport@v1.1.4 with: - github_token: ${{ secrets.GITHUB_TOKEN }} + github_token: ${{ steps.github_app_token.outputs.token }} + branch_name: backport/backport-${{ github.event.number }} diff --git a/.github/workflows/delete_backport_branch.yml b/.github/workflows/delete_backport_branch.yml new file mode 100644 index 0000000000000..d654df6b40257 --- /dev/null +++ b/.github/workflows/delete_backport_branch.yml @@ -0,0 +1,15 @@ +name: Delete merged branch of the backport PRs +on: + pull_request: + types: + - closed + +jobs: + delete-branch: + runs-on: ubuntu-latest + if: startsWith(github.event.pull_request.head.ref,'backport/') + steps: + - name: Delete merged branch + uses: SvanBoxel/delete-merged-branch@main + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml new file mode 100644 index 0000000000000..2ac904bf4ccf7 --- /dev/null +++ b/.github/workflows/dependabot_pr.yml @@ -0,0 +1,49 @@ +name: Dependabot PR actions +on: pull_request + +jobs: + dependabot: + runs-on: ubuntu-latest + permissions: + pull-requests: write + contents: write + if: ${{ github.actor == 'dependabot[bot]' }} + steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + + - name: Check out code + uses: actions/checkout@v2 + with: + token: ${{ steps.github_app_token.outputs.token }} + + - name: Update Gradle SHAs + run: | + ./gradlew updateSHAs + + - name: Commit the changes + uses: stefanzweifel/git-auto-commit-action@v4.7.2 + with: + commit_message: Updating SHAs + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' + + - name: Run spotless + run: | + ./gradlew spotlessApply + + - name: Commit the changes + uses: stefanzweifel/git-auto-commit-action@v4.7.2 + with: + commit_message: Spotless formatting + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 2d3bc512dc646..b42e7c4f2f317 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -8,9 +8,15 @@ on: jobs: build: runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - uses: actions/checkout@v2 - name: Fetch Tag and Version Information run: | @@ -35,6 +41,8 @@ jobs: - uses: actions/checkout@v2 with: ref: ${{ env.BASE }} + token: ${{ steps.github_app_token.outputs.token }} + - name: Increment Patch Version run: | echo Incrementing $CURRENT_VERSION to $NEXT_VERSION @@ -43,9 +51,11 @@ jobs: echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" server/src/main/java/org/opensearch/Version.java sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" server/src/main/java/org/opensearch/Version.java + - name: Create Pull Request uses: peter-evans/create-pull-request@v3 with: + token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} branch: 'create-pull-request/patch-${{ env.BASE }}' commit-message: Incremented version to ${{ env.NEXT_VERSION }} @@ -57,15 +67,19 @@ jobs: - uses: actions/checkout@v2 with: ref: ${{ env.BASE_X }} + token: ${{ steps.github_app_token.outputs.token }} + - name: Add bwc version to .X branch run: | echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" server/src/main/java/org/opensearch/Version.java + - name: Create Pull Request uses: peter-evans/create-pull-request@v3 with: + token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} branch: 'create-pull-request/patch-${{ env.BASE_X }}' commit-message: Added bwc version ${{ env.NEXT_VERSION }} @@ -77,15 +91,19 @@ jobs: - uses: actions/checkout@v2 with: ref: main + token: ${{ steps.github_app_token.outputs.token }} + - name: Add bwc version to main branch run: | echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" server/src/main/java/org/opensearch/Version.java + - name: Create Pull Request uses: peter-evans/create-pull-request@v3 with: + token: ${{ steps.github_app_token.outputs.token }} base: main branch: 'create-pull-request/patch-main' commit-message: Added bwc version ${{ env.NEXT_VERSION }} diff --git a/.gitignore b/.gitignore index cf0c61a6ac6fb..e2cb6d8d37a82 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ out/ # include shared intellij config !.idea/inspectionProfiles/Project_Default.xml !.idea/runConfigurations/Debug_OpenSearch.xml +!.idea/vcs.xml # These files are generated in the main tree by IntelliJ benchmarks/src/main/generated/* diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000000000..48557884a8893 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,20 @@ + + + + + + + + + diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 381c7c64077cd..839144d06b6ec 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -3,7 +3,7 @@ - [Git Clone OpenSearch Repo](#git-clone-opensearch-repo) - [Install Prerequisites](#install-prerequisites) - [JDK 11](#jdk-11) - - [JDK 8 and 14](#jdk-8-and-14) + - [JDK 14](#jdk-14) - [Runtime JDK](#runtime-jdk) - [Windows](#windows) - [Docker](#docker) @@ -33,6 +33,8 @@ - [runtimeOnly](#runtimeonly) - [compileOnly](#compileonly) - [testImplementation](#testimplementation) + - [Gradle Plugins](#gradle-plugins) + - [Distribution Download Plugin](#distribution-download-plugin) - [Misc](#misc) - [git-secrets](#git-secrets) - [Installation](#installation) @@ -65,13 +67,13 @@ OpenSearch builds using Java 11 at a minimum. This means you must have a JDK 11 Download Java 11 from [here](https://adoptium.net/releases.html?variant=openjdk11). -#### JDK 8 and 14 +#### JDK 14 -To run the full suite of tests, download and install [JDK 8](https://adoptium.net/releases.html?variant=openjdk8) and [JDK 14](https://jdk.java.net/archive/) and set `JAVA8_HOME`, `JAVA11_HOME`, and `JAVA14_HOME`. They are required by the [backwards compatibility test](./TESTING.md#testing-backwards-compatibility). +To run the full suite of tests, download and install [JDK 14](https://jdk.java.net/archive/) and set `JAVA11_HOME`, and `JAVA14_HOME`. They are required by the [backwards compatibility test](./TESTING.md#testing-backwards-compatibility). #### Runtime JDK -By default, the test tasks use bundled JDK runtime, configured in `buildSrc/version.properties` and set to JDK 17 (LTS). Other kind of test tasks (integration, cluster, ... ) use the same runtime as `JAVA_HOME`. However, since OpenSearch supports JDK 8 as the runtime, the build supports compiling with JDK 11 and testing on a different version of JDK runtime. To do this, set `RUNTIME_JAVA_HOME` pointing to the Java home of another JDK installation, e.g. `RUNTIME_JAVA_HOME=/usr/lib/jvm/jdk-8`. Alternatively, the runtime JDK version could be provided as the command line argument, using combination of `runtime.java=` property and `JAVA_HOME` environment variable, for example `./gradlew -Druntime.java=17 ...` (in this case, the tooling expects `JAVA17_HOME` environment variable to be set). +By default, the test tasks use bundled JDK runtime, configured in `buildSrc/version.properties` and set to JDK 17 (LTS). Other kind of test tasks (integration, cluster, ... ) use the same runtime as `JAVA_HOME`. However, the build supports compiling with JDK 11 and testing on a different version of JDK runtime. To do this, set `RUNTIME_JAVA_HOME` pointing to the Java home of another JDK installation, e.g. `RUNTIME_JAVA_HOME=/usr/lib/jvm/jdk-14`. Alternatively, the runtime JDK version could be provided as the command line argument, using combination of `runtime.java=` property and `JAVA_HOME` environment variable, for example `./gradlew -Druntime.java=17 ...` (in this case, the tooling expects `JAVA17_HOME` environment variable to be set). #### Windows @@ -183,6 +185,15 @@ You can import the OpenSearch project into IntelliJ IDEA as follows. 2. In the subsequent dialog navigate to the root `build.gradle` file 3. In the subsequent dialog select **Open as Project** +#### Remote development using JetBrains Gateway + +[JetBrains Gateway](https://www.jetbrains.com/remote-development/gateway/) enables development, testing and debugging on remote machines like development servers. + +1. On the local development machine, download and install the latest thin client from the [JetBrains Gateway page](https://www.jetbrains.com/remote-development/gateway/). +2. Create a new connection to the remote server and install an IntelliJ server support using [these instructions](https://www.jetbrains.com/help/idea/remote-development-starting-page.html#connect_to_rd_ij). + +Follow the [IntelliJ IDEA instructions](#intellij-idea) post a successful connection. + ### Visual Studio Code Follow links in the [Java Tutorial](https://code.visualstudio.com/docs/java/java-tutorial) to install the coding pack and extensions for Java, Gradle tasks, etc. Open the source code directory. @@ -331,6 +342,15 @@ somehow. OpenSearch plugins use this configuration to include dependencies that Code that is on the classpath for compiling tests that are part of this project but not production code. The canonical example of this is `junit`. +### Gradle Plugins + +#### Distribution Download Plugin + +The Distribution Download plugin downloads the latest version of OpenSearch by default, and supports overriding this behavior by setting `customDistributionUrl`. +``` +./gradlew integTest -DcustomDistributionUrl="https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/1127/linux/x64/dist/opensearch-1.2.0-linux-x64.tar.gz" +``` + ## Misc ### git-secrets diff --git a/TESTING.md b/TESTING.md index f6c951762530d..5571b7c7a4aaf 100644 --- a/TESTING.md +++ b/TESTING.md @@ -421,9 +421,11 @@ Unit tests are the preferred way to test some functionality: most of the time th The reason why `OpenSearchSingleNodeTestCase` exists is that all our components used to be very hard to set up in isolation, which had led us to having a number of integration tests but close to no unit tests. `OpenSearchSingleNodeTestCase` is a workaround for this issue which provides an easy way to spin up a node and get access to components that are hard to instantiate like `IndicesService`. Whenever practical, you should prefer unit tests. -Many tests extend `OpenSearchIntegTestCase`, mostly because this is how most tests used to work in the early days of Elasticsearch. However, the complexity of these tests tends to make them hard to debug. Whenever the functionality that is being tested isn’t intimately dependent on how OpenSearch behaves as a cluster, it is recommended to write unit tests or REST tests instead. +Finally, if the the functionality under test needs to be run in a cluster, there are two test classes to consider: + * `OpenSearchRestTestCase` will connect to an external cluster. This is a good option if the tests cases don't rely on a specific configuration of the test cluster. A test cluster is set up as part of the Gradle task running integration tests, and test cases using this class can connect to it. The configuration of the cluster is provided in the Gradle files. + * `OpenSearchIntegTestCase` will create a local cluster as part of each test case. The configuration of the cluster is controlled by the test class. This is a good option if different tests cases depend on different cluster configurations, as it would be impractical (and limit parallelization) to keep re-configuring (and re-starting) the external cluster for each test case. A good example of when this class might come in handy is for testing security features, where different cluster configurations are needed to fully test each one. -In short, most new functionality should come with unit tests, and optionally REST tests to test integration. +In short, most new functionality should come with unit tests, and optionally integration tests using either an external cluster or a local one if there's a need for more specific cluster configurations, as those are more costly and harder to maintain/debug. ### Refactor code to make it easier to test diff --git a/Vagrantfile b/Vagrantfile index 42eb492c65088..28d5894664c2f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -484,6 +484,7 @@ def sh_install_deps(config, cat \<\ /etc/sudoers.d/opensearch_vars Defaults env_keep += "JAVA_HOME" Defaults env_keep += "SYSTEM_JAVA_HOME" +Defaults env_keep += "OPENSEARCH_JAVA_HOME" SUDOERS_VARS chmod 0440 /etc/sudoers.d/opensearch_vars SHELL diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 34dc7a5691e0b..1a6846e9eecb0 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -48,7 +48,7 @@ dependencies { api "org.openjdk.jmh:jmh-core:$versions.jmh" annotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" // Dependencies of JMH - runtimeOnly 'net.sf.jopt-simple:jopt-simple:4.6' + runtimeOnly 'net.sf.jopt-simple:jopt-simple:5.0.4' runtimeOnly 'org.apache.commons:commons-math3:3.2' } diff --git a/build.gradle b/build.gradle index 4be3ce4e53cd0..c12f7ece4d39c 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "5.6.1" apply false + id "com.diffplug.spotless" version "6.3.0" apply false } apply from: 'gradle/build-complete.gradle' @@ -274,6 +274,14 @@ allprojects { javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') } + // support for reproducible builds + tasks.withType(AbstractArchiveTask).configureEach { + // ignore file timestamps + // be consistent in archive file order + preserveFileTimestamps = false + reproducibleFileOrder = true + } + project.afterEvaluate { // Handle javadoc dependencies across projects. Order matters: the linksOffline for // org.opensearch:opensearch must be the last one or all the links for the diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 05f60f307096e..6565dc1646ac0 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -102,7 +102,7 @@ dependencies { api localGroovy() - api 'commons-codec:commons-codec:1.13' + api 'commons-codec:commons-codec:1.15' api 'org.apache.commons:commons-compress:1.21' api 'org.apache.ant:ant:1.10.12' api 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3' @@ -111,12 +111,12 @@ dependencies { api 'org.apache.rat:apache-rat:0.13' api 'commons-io:commons-io:2.7' api "net.java.dev.jna:jna:5.5.0" - api 'com.github.jengelman.gradle.plugins:shadow:6.0.0' - api 'de.thetaphi:forbiddenapis:3.0' + api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' + api 'de.thetaphi:forbiddenapis:3.2' api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12' api 'org.apache.maven:maven-model:3.6.2' api 'com.networknt:json-schema-validator:1.0.36' - api 'com.fasterxml.jackson.core:jackson-databind:2.12.5' + api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson')}" testFixturesApi "junit:junit:${props.getProperty('junit')}" testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" @@ -158,8 +158,8 @@ if (project != rootProject) { apply plugin: 'opensearch.publish' allprojects { - targetCompatibility = 10 - sourceCompatibility = 10 + targetCompatibility = 11 + sourceCompatibility = 11 } // groovydoc succeeds, but has some weird internal exception... diff --git a/buildSrc/src/integTest/java/org/opensearch/gradle/precommit/ThirdPartyAuditTaskIT.java b/buildSrc/src/integTest/java/org/opensearch/gradle/precommit/ThirdPartyAuditTaskIT.java index c658f8d18be70..4e5bbfd409f90 100644 --- a/buildSrc/src/integTest/java/org/opensearch/gradle/precommit/ThirdPartyAuditTaskIT.java +++ b/buildSrc/src/integTest/java/org/opensearch/gradle/precommit/ThirdPartyAuditTaskIT.java @@ -71,7 +71,7 @@ public void testWithEmptyRules() { "-PcompileOnlyVersion=0.0.1", "-PcompileGroup=other.gradle:dummy-io", "-PcompileVersion=0.0.1" - ).buildAndFail(); + ).build(); } public void testViolationFoundAndCompileOnlyIgnored() { diff --git a/buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/.ci/java-versions.properties b/buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/.ci/java-versions.properties index ded62500f1d30..97dbc8f7ced11 100644 --- a/buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/.ci/java-versions.properties +++ b/buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/.ci/java-versions.properties @@ -28,5 +28,5 @@ # under the License. # OPENSEARCH_BUILD_JAVA=openjdk11 -OPENSEARCH_RUNTIME_JAVA=java8 +OPENSEARCH_RUNTIME_JAVA=java11 GRADLE_TASK=build diff --git a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java index 86774265c81ad..8b5e81bc2ef07 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java @@ -105,7 +105,6 @@ public void apply(Project project) { setupResolutionsContainer(project); setupDistributionContainer(project, dockerSupport); - setupDownloadServiceRepo(project); project.afterEvaluate(this::setupDistributions); } @@ -153,6 +152,7 @@ void setupDistributions(Project project) { dependencies.add(distribution.getExtracted().getName(), distributionDependency.getExtractedNotation()); } } + setupDownloadServiceRepo(project); } private DistributionDependency resolveDependencyNotation(Project p, OpenSearchDistribution distribution) { @@ -195,16 +195,22 @@ private static void setupDownloadServiceRepo(Project project) { if (project.getRepositories().findByName(DOWNLOAD_REPO_NAME) != null) { return; } - addIvyRepo( - project, - DOWNLOAD_REPO_NAME, - "https://artifacts.opensearch.org", - FAKE_IVY_GROUP, - "/releases" + RELEASE_PATTERN_LAYOUT, - "/release-candidates" + RELEASE_PATTERN_LAYOUT - ); - - addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://artifacts.opensearch.org", FAKE_SNAPSHOT_IVY_GROUP, SNAPSHOT_PATTERN_LAYOUT); + Object customDistributionUrl = project.findProperty("customDistributionUrl"); + // checks if custom Distribution Url has been passed by user from plugins + if (customDistributionUrl != null) { + addIvyRepo(project, DOWNLOAD_REPO_NAME, customDistributionUrl.toString(), FAKE_IVY_GROUP, ""); + addIvyRepo(project, SNAPSHOT_REPO_NAME, customDistributionUrl.toString(), FAKE_SNAPSHOT_IVY_GROUP, ""); + } else { + addIvyRepo( + project, + DOWNLOAD_REPO_NAME, + "https://artifacts.opensearch.org", + FAKE_IVY_GROUP, + "/releases" + RELEASE_PATTERN_LAYOUT, + "/release-candidates" + RELEASE_PATTERN_LAYOUT + ); + addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://artifacts.opensearch.org", FAKE_SNAPSHOT_IVY_GROUP, SNAPSHOT_PATTERN_LAYOUT); + } addIvyRepo2(project, DOWNLOAD_REPO_NAME_ES, "https://artifacts-no-kpi.elastic.co", FAKE_IVY_GROUP_ES); addIvyRepo2(project, SNAPSHOT_REPO_NAME_ES, "https://snapshots-no-kpi.elastic.co", FAKE_SNAPSHOT_IVY_GROUP_ES); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java index 80850e05b8a02..2750503334baf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java @@ -176,14 +176,10 @@ public static void configureCompile(Project project) { compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); }); // also apply release flag to groovy, which is used in build-tools - project.getTasks() - .withType(GroovyCompile.class) - .configureEach( - compileTask -> { - // TODO: this probably shouldn't apply to groovy at all? - compileTask.getOptions().getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); - } - ); + project.getTasks().withType(GroovyCompile.class).configureEach(compileTask -> { + // TODO: this probably shouldn't apply to groovy at all? + compileTask.getOptions().getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); + }); }); } @@ -205,50 +201,37 @@ public static void configureInputNormalization(Project project) { * Adds additional manifest info to jars */ static void configureJars(Project project) { - project.getTasks() - .withType(Jar.class) - .configureEach( - jarTask -> { - // we put all our distributable files under distributions - jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions")); - // fixup the jar manifest - // Explicitly using an Action interface as java lambdas - // are not supported by Gradle up-to-date checks - jarTask.doFirst(new Action() { - @Override - public void execute(Task task) { - // this doFirst is added before the info plugin, therefore it will run - // after the doFirst added by the info plugin, and we can override attributes - jarTask.getManifest() - .attributes( - Map.of( - "Build-Date", - BuildParams.getBuildDate(), - "Build-Java-Version", - BuildParams.getGradleJavaVersion() - ) - ); - } - }); + project.getTasks().withType(Jar.class).configureEach(jarTask -> { + // we put all our distributable files under distributions + jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions")); + // fixup the jar manifest + // Explicitly using an Action interface as java lambdas + // are not supported by Gradle up-to-date checks + jarTask.doFirst(new Action() { + @Override + public void execute(Task task) { + // this doFirst is added before the info plugin, therefore it will run + // after the doFirst added by the info plugin, and we can override attributes + jarTask.getManifest() + .attributes( + Map.of("Build-Date", BuildParams.getBuildDate(), "Build-Java-Version", BuildParams.getGradleJavaVersion()) + ); } - ); + }); + }); project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { - project.getTasks() - .withType(ShadowJar.class) - .configureEach( - shadowJar -> { - /* - * Replace the default "-all" classifier with null - * which will leave the classifier off of the file name. - */ - shadowJar.getArchiveClassifier().set((String) null); - /* - * Not all cases need service files merged but it is - * better to be safe - */ - shadowJar.mergeServiceFiles(); - } - ); + project.getTasks().withType(ShadowJar.class).configureEach(shadowJar -> { + /* + * Replace the default "-all" classifier with null + * which will leave the classifier off of the file name. + */ + shadowJar.getArchiveClassifier().set((String) null); + /* + * Not all cases need service files merged but it is + * better to be safe + */ + shadowJar.mergeServiceFiles(); + }); // Add "original" classifier to the non-shadowed JAR to distinguish it from the shadow JAR project.getTasks().named(JavaPlugin.JAR_TASK_NAME, Jar.class).configure(jar -> jar.getArchiveClassifier().set("original")); // Make sure we assemble the shadow jar diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index 556060d8afe4a..8a972bfa37e78 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -217,7 +217,7 @@ public void execute(Task t) { .getByName(SourceSet.MAIN_SOURCE_SET_NAME) .getRuntimeClasspath(); // Add any "shadow" dependencies. These are dependencies that are *not* bundled into the shadow JAR - Configuration shadowConfig = project.getConfigurations().getByName(ShadowBasePlugin.getCONFIGURATION_NAME()); + Configuration shadowConfig = project.getConfigurations().getByName(ShadowBasePlugin.CONFIGURATION_NAME); // Add the shadow JAR artifact itself FileCollection shadowJar = project.files(project.getTasks().named("shadowJar")); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index e2bd3e71f5fc9..d164b54c7506c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -107,7 +107,7 @@ public String call() throws Exception { root.appendNode("name", project.getName()); root.appendNode("description", project.getDescription()); Node dependenciesNode = (Node) ((NodeList) root.get("dependencies")).get(0); - project.getConfigurations().getByName(ShadowBasePlugin.getCONFIGURATION_NAME()).getAllDependencies().all(dependency -> { + project.getConfigurations().getByName(ShadowBasePlugin.CONFIGURATION_NAME).getAllDependencies().all(dependency -> { if (dependency instanceof ProjectDependency) { Node dependencyNode = dependenciesNode.appendNode("dependency"); dependencyNode.appendNode("groupId", dependency.getGroup()); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java index 60afe8334f05d..ccd82372bb11b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java @@ -121,7 +121,7 @@ public void apply(Project project) { params.setGradleJavaVersion(Jvm.current().getJavaVersion()); params.setGitRevision(gitInfo.getRevision()); params.setGitOrigin(gitInfo.getOrigin()); - params.setBuildDate(ZonedDateTime.now(ZoneOffset.UTC)); + params.setBuildDate(Util.getBuildDate(ZonedDateTime.now(ZoneOffset.UTC))); params.setTestSeed(getTestSeed()); params.setIsCi(System.getenv("JENKINS_URL") != null); params.setIsInternal(isInternal); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitPlugin.java index 183148f3e1bef..0b4cc20f145ad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitPlugin.java @@ -53,16 +53,12 @@ public final void apply(Project project) { TaskProvider precommit = project.getTasks().named(PRECOMMIT_TASK_NAME); precommit.configure(t -> t.dependsOn(task)); - project.getPluginManager() - .withPlugin( - "java", - p -> { - // We want to get any compilation error before running the pre-commit checks. - for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) { - task.configure(t -> t.shouldRunAfter(sourceSet.getClassesTaskName())); - } - } - ); + project.getPluginManager().withPlugin("java", p -> { + // We want to get any compilation error before running the pre-commit checks. + for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) { + task.configure(t -> t.shouldRunAfter(sourceSet.getClassesTaskName())); + } + }); } public abstract TaskProvider createTask(Project project); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTaskPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTaskPlugin.java index 43e7d5bf69581..52cbdbded2f13 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTaskPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTaskPlugin.java @@ -56,18 +56,14 @@ public void apply(Project project) { "lifecycle-base", p -> project.getTasks().named(LifecycleBasePlugin.CHECK_TASK_NAME).configure(t -> t.dependsOn(precommit)) ); - project.getPluginManager() - .withPlugin( - "java", - p -> { - // run compilation as part of precommit - for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) { - precommit.configure(t -> t.dependsOn(sourceSet.getClassesTaskName())); - } + project.getPluginManager().withPlugin("java", p -> { + // run compilation as part of precommit + for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) { + precommit.configure(t -> t.dependsOn(sourceSet.getClassesTaskName())); + } - // make sure tests run after all precommit tasks - project.getTasks().withType(Test.class).configureEach(t -> t.mustRunAfter(precommit)); - } - ); + // make sure tests run after all precommit tasks + project.getTasks().withType(Test.class).configureEach(t -> t.mustRunAfter(precommit)); + }); } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java index 9cf58b25c58d2..5d707ce2b9f28 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -51,7 +51,7 @@ public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { public TaskProvider createTask(Project project) { project.getPlugins().apply(CompileOnlyResolvePlugin.class); project.getConfigurations().create("forbiddenApisCliJar"); - project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:2.7"); + project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.2"); Configuration jdkJarHellConfig = project.getConfigurations().create(JDK_JAR_HELL_CONFIG_NAME); if (BuildParams.isInternal() && project.getPath().equals(":libs:opensearch-core") == false) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 81b51f7f1f9ff..68fab04c2217f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -76,7 +76,7 @@ public class DistroTestPlugin implements Plugin { private static final String SYSTEM_JDK_VERSION = "8u242+b08"; private static final String SYSTEM_JDK_VENDOR = "adoptopenjdk"; - private static final String GRADLE_JDK_VERSION = "17.0.1+12"; + private static final String GRADLE_JDK_VERSION = "17.0.2+8"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 63090f9402cdd..bf17daa6e2e6f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -31,6 +31,7 @@ package org.opensearch.gradle.testclusters; +import groovy.lang.Closure; import org.opensearch.gradle.FileSystemOperationsAware; import org.opensearch.gradle.test.Fixture; import org.opensearch.gradle.util.GradleUtils; @@ -60,6 +61,7 @@ public class StandaloneRestIntegTestTask extends Test implements TestClustersAware, FileSystemOperationsAware { private Collection clusters = new HashSet<>(); + private Closure beforeStart; public StandaloneRestIntegTestTask() { this.getOutputs() @@ -86,6 +88,18 @@ public StandaloneRestIntegTestTask() { ); } + // Hook for executing any custom logic before starting the task. + public void setBeforeStart(Closure closure) { + beforeStart = closure; + } + + @Override + public void beforeStart() { + if (beforeStart != null) { + beforeStart.call(this); + } + } + @Override public int getMaxParallelForks() { return 1; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java b/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java index fc79d991211d6..71b1e5040340d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java @@ -48,6 +48,9 @@ import java.io.UncheckedIOException; import java.net.URI; import java.net.URISyntaxException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Locale; import java.util.Optional; import java.util.function.Supplier; @@ -187,4 +190,17 @@ public String toString() { } }; } + + public static ZonedDateTime getBuildDate(ZonedDateTime defaultValue) { + final String sourceDateEpoch = System.getenv("SOURCE_DATE_EPOCH"); + if (sourceDateEpoch != null) { + try { + return ZonedDateTime.ofInstant(Instant.ofEpochSecond(Long.parseLong(sourceDateEpoch)), ZoneOffset.UTC); + } catch (NumberFormatException e) { + throw new GradleException("Sysprop [SOURCE_DATE_EPOCH] must be of type [long]", e); + } + } else { + return defaultValue; + } + } } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java index 98feb3ef2ac93..d6299311e1d9b 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java @@ -38,6 +38,7 @@ import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; +import org.gradle.api.internal.artifacts.repositories.DefaultIvyArtifactRepository; import org.gradle.testfixtures.ProjectBuilder; import java.io.File; @@ -79,6 +80,58 @@ public void testVersionDefault() { assertEquals(distro.getVersion(), VersionProperties.getOpenSearch()); } + public void testCustomDistributionUrlWithUrl() { + Project project = createProject(null, false); + String customUrl = "https://artifacts.opensearch.org/custom"; + project.getExtensions().getExtraProperties().set("customDistributionUrl", customUrl); + DistributionDownloadPlugin plugin = project.getPlugins().getPlugin(DistributionDownloadPlugin.class); + plugin.setupDistributions(project); + assertEquals(4, project.getRepositories().size()); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-downloads")).getUrl().toString(), + customUrl + ); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-snapshots")).getUrl().toString(), + customUrl + ); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-downloads")).getUrl().toString(), + "https://artifacts-no-kpi.elastic.co" + ); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-snapshots")).getUrl().toString(), + "https://snapshots-no-kpi.elastic.co" + ); + } + + public void testCustomDistributionUrlWithoutUrl() { + Project project = createProject(null, false); + DistributionDownloadPlugin plugin = project.getPlugins().getPlugin(DistributionDownloadPlugin.class); + plugin.setupDistributions(project); + assertEquals(5, project.getRepositories().size()); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-downloads")).getUrl().toString(), + "https://artifacts.opensearch.org" + ); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-downloads2")).getUrl().toString(), + "https://artifacts.opensearch.org" + ); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-snapshots")).getUrl().toString(), + "https://artifacts.opensearch.org" + ); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-downloads")).getUrl().toString(), + "https://artifacts-no-kpi.elastic.co" + ); + assertEquals( + ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-snapshots")).getUrl().toString(), + "https://snapshots-no-kpi.elastic.co" + ); + } + public void testBadVersionFormat() { assertDistroError( createProject(null, false), diff --git a/buildSrc/src/testKit/testingConventions/build.gradle b/buildSrc/src/testKit/testingConventions/build.gradle index 309a9d64d4170..418e833e8cb14 100644 --- a/buildSrc/src/testKit/testingConventions/build.gradle +++ b/buildSrc/src/testKit/testingConventions/build.gradle @@ -21,7 +21,7 @@ allprojects { mavenCentral() } dependencies { - testImplementation "junit:junit:4.13.1" + testImplementation "junit:junit:4.13.2" } ext.licenseFile = file("$buildDir/dummy/license") diff --git a/buildSrc/src/testKit/thirdPartyAudit/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/build.gradle index 21b0fc3e74f57..41e699db94dcf 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/build.gradle @@ -40,7 +40,7 @@ repositories { } dependencies { - forbiddenApisCliJar 'de.thetaphi:forbiddenapis:2.7' + forbiddenApisCliJar 'de.thetaphi:forbiddenapis:3.2' jdkJarHell 'org.opensearch:opensearch-core:current' compileOnly "org.${project.properties.compileOnlyGroup}:${project.properties.compileOnlyVersion}" implementation "org.${project.properties.compileGroup}:${project.properties.compileVersion}" diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 9d175ed65715d..57010739dfc44 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -16,7 +16,7 @@ repositories { mavenCentral() } dependencies { - implementation 'org.apache.logging.log4j:log4j-core:2.11.1' + implementation 'org.apache.logging.log4j:log4j-core:2.17.1' } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7b702c5bb873d..bfc939394bdaa 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,14 +2,14 @@ opensearch = 2.0.0 lucene = 8.10.1 bundled_jdk_vendor = adoptium -bundled_jdk = 17.0.1+12 +bundled_jdk = 17.0.2+8 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.12.5 +jackson = 2.12.6 snakeyaml = 1.26 icu4j = 62.1 supercsv = 2.4.0 @@ -19,7 +19,7 @@ slf4j = 1.6.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.72.Final +netty = 4.1.73.Final joda = 2.10.12 # when updating this version, you need to ensure compatibility with: @@ -35,9 +35,9 @@ httpasyncclient = 4.1.4 commonslogging = 1.1.3 commonscodec = 1.13 hamcrest = 2.1 -mockito = 4.2.0 +mockito = 4.3.1 objenesis = 3.2 -bytebuddy = 1.12.6 +bytebuddy = 1.12.7 # benchmark dependencies jmh = 1.19 diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index 58d932702ba8b..4aa4d7171e366 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -43,7 +43,7 @@ mainClassName = 'org.opensearch.client.benchmark.BenchmarkMain' test.enabled = false dependencies { - api 'org.apache.commons:commons-math3:3.2' + api 'org.apache.commons:commons-math3:3.6.1' api project(":client:rest") // bottleneck should be the client, not OpenSearch diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java index 8f42744aeb5c9..62123870f0099 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -97,7 +97,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC bulkRequest.add( request.requiredContent(), defaultIndex, - defaultType, defaultRouting, null, defaultPipeline, @@ -117,7 +116,7 @@ private static class BulkRestBuilderListener extends RestBuilderListener listener - ) { - return restHighLevelClient.performRequestAsyncAndParseEntity( - syncedFlushRequest, - IndicesRequestConverters::flushSynced, - options, - SyncedFlushResponse::fromXContent, - listener, - emptySet() - ); - } - /** * Retrieve the settings of one or more indices. * @@ -1915,7 +1867,6 @@ public Cancellable simulateIndexTemplateAsync( /** * Validate a potentially expensive query without executing it. - *

* * @param validateQueryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -1934,7 +1885,6 @@ public ValidateQueryResponse validateQuery(ValidateQueryRequest validateQueryReq /** * Asynchronously validate a potentially expensive query without executing it. - *

* * @param validateQueryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index 727e91fc210cd..9979d18635d05 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -42,7 +42,6 @@ import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.refresh.RefreshRequest; @@ -322,15 +321,6 @@ static Request flush(FlushRequest flushRequest) { return request; } - static Request flushSynced(SyncedFlushRequest syncedFlushRequest) { - String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush/synced")); - RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withIndicesOptions(syncedFlushRequest.indicesOptions()); - request.addParameters(parameters.asMap()); - return request; - } - static Request forceMerge(ForceMergeRequest forceMergeRequest) { String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge")); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java index 93dd3513a4614..cd304019e771c 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java @@ -175,7 +175,6 @@ public Cancellable deletePipelineAsync( /** * Simulate a pipeline on a set of documents provided in the request - *

* * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -194,7 +193,6 @@ public SimulatePipelineResponse simulate(SimulatePipelineRequest request, Reques /** * Asynchronously simulate a pipeline on a set of documents provided in the request - *

* * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index f0f33ae1e71fe..3e43963db519f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -117,7 +117,7 @@ private RequestConverters() { } static Request delete(DeleteRequest deleteRequest) { - String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); + String endpoint = endpoint(deleteRequest.index(), deleteRequest.id()); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); Params parameters = new Params(); @@ -185,11 +185,6 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { if (Strings.hasLength(action.index())) { metadata.field("_index", action.index()); } - if (Strings.hasLength(action.type())) { - if (MapperService.SINGLE_MAPPING_NAME.equals(action.type()) == false) { - metadata.field("_type", action.type()); - } - } if (Strings.hasLength(action.id())) { metadata.field("_id", action.id()); } @@ -284,7 +279,7 @@ static Request get(GetRequest getRequest) { } private static Request getStyleRequest(String method, GetRequest getRequest) { - Request request = new Request(method, endpoint(getRequest.index(), getRequest.type(), getRequest.id())); + Request request = new Request(method, endpoint(getRequest.index(), getRequest.id())); Params parameters = new Params(); parameters.withPreference(getRequest.preference()); @@ -315,13 +310,7 @@ private static Request sourceRequest(GetSourceRequest getSourceRequest, String h parameters.withRealtime(getSourceRequest.realtime()); parameters.withFetchSourceContext(getSourceRequest.fetchSourceContext()); - String optionalType = getSourceRequest.type(); - String endpoint; - if (optionalType == null) { - endpoint = endpoint(getSourceRequest.index(), "_source", getSourceRequest.id()); - } else { - endpoint = endpoint(getSourceRequest.index(), optionalType, getSourceRequest.id(), "_source"); - } + String endpoint = endpoint(getSourceRequest.index(), "_source", getSourceRequest.id()); Request request = new Request(httpMethodName, endpoint); request.addParameters(parameters.asMap()); return request; @@ -344,11 +333,9 @@ static Request index(IndexRequest indexRequest) { String endpoint; if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { - endpoint = indexRequest.type().equals(MapperService.SINGLE_MAPPING_NAME) - ? endpoint(indexRequest.index(), "_create", indexRequest.id()) - : endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), "_create"); + endpoint = endpoint(indexRequest.index(), "_create", indexRequest.id()); } else { - endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id()); + endpoint = endpoint(indexRequest.index(), indexRequest.id()); } Request request = new Request(method, endpoint); @@ -377,9 +364,7 @@ static Request ping() { } static Request update(UpdateRequest updateRequest) throws IOException { - String endpoint = updateRequest.type().equals(MapperService.SINGLE_MAPPING_NAME) - ? endpoint(updateRequest.index(), "_update", updateRequest.id()) - : endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update"); + String endpoint = endpoint(updateRequest.index(), "_update", updateRequest.id()); Request request = new Request(HttpPost.METHOD_NAME, endpoint); Params parameters = new Params(); @@ -432,7 +417,7 @@ static Request update(UpdateRequest updateRequest) throws IOException { * for standard searches */ static Request search(SearchRequest searchRequest, String searchEndpoint) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchRequest.types(), searchEndpoint)); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchEndpoint)); Params params = new Params(); addSearchRequestParams(params, searchRequest); @@ -502,7 +487,7 @@ static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throw request = new Request(HttpGet.METHOD_NAME, "_render/template"); } else { SearchRequest searchRequest = searchTemplateRequest.getRequest(); - String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search/template"); + String endpoint = endpoint(searchRequest.indices(), "_search/template"); request = new Request(HttpGet.METHOD_NAME, endpoint); Params params = new Params(); @@ -548,9 +533,7 @@ static Request count(CountRequest countRequest) throws IOException { } static Request explain(ExplainRequest explainRequest) throws IOException { - String endpoint = explainRequest.type().equals(MapperService.SINGLE_MAPPING_NAME) - ? endpoint(explainRequest.index(), "_explain", explainRequest.id()) - : endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain"); + String endpoint = endpoint(explainRequest.index(), "_explain", explainRequest.id()); Request request = new Request(HttpGet.METHOD_NAME, endpoint); Params params = new Params(); @@ -633,7 +616,7 @@ private static Request prepareReindexRequest(ReindexRequest reindexRequest, bool private static Request prepareDeleteByQueryRequest(DeleteByQueryRequest deleteByQueryRequest, boolean waitForCompletion) throws IOException { - String endpoint = endpoint(deleteByQueryRequest.indices(), deleteByQueryRequest.getDocTypes(), "_delete_by_query"); + String endpoint = endpoint(deleteByQueryRequest.indices(), "_delete_by_query"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); Params params = new Params().withRouting(deleteByQueryRequest.getRouting()) .withRefresh(deleteByQueryRequest.isRefresh()) @@ -661,7 +644,7 @@ private static Request prepareDeleteByQueryRequest(DeleteByQueryRequest deleteBy } static Request prepareUpdateByQueryRequest(UpdateByQueryRequest updateByQueryRequest, boolean waitForCompletion) throws IOException { - String endpoint = endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query"); + String endpoint = endpoint(updateByQueryRequest.indices(), "_update_by_query"); Request request = new Request(HttpPost.METHOD_NAME, endpoint); Params params = new Params().withRouting(updateByQueryRequest.getRouting()) .withPipeline(updateByQueryRequest.getPipeline()) @@ -799,10 +782,16 @@ static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } + static String endpoint(String index, String id) { + return new EndpointBuilder().addPathPart(index, MapperService.SINGLE_MAPPING_NAME, id).build(); + } + + @Deprecated static String endpoint(String index, String type, String id) { return new EndpointBuilder().addPathPart(index, type, id).build(); } + @Deprecated static String endpoint(String index, String type, String id, String endpoint) { return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build(); } @@ -815,6 +804,7 @@ static String endpoint(String[] indices, String endpoint) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); } + @Deprecated static String endpoint(String[] indices, String[] types, String endpoint) { return new EndpointBuilder().addCommaSeparatedPathParts(indices) .addCommaSeparatedPathParts(types) @@ -829,6 +819,7 @@ static String endpoint(String[] indices, String endpoint, String[] suffixes) { .build(); } + @Deprecated static String endpoint(String[] indices, String endpoint, String type) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type).build(); } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java index c702fcda89e5d..85a793dec24ce 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java @@ -293,7 +293,6 @@ public CreateSnapshotResponse create(CreateSnapshotRequest createSnapshotRequest /** * Asynchronously creates a snapshot. - *

* * @return cancellable that may be used to cancel the request */ @@ -327,7 +326,6 @@ public AcknowledgedResponse clone(CloneSnapshotRequest cloneSnapshotRequest, Req /** * Asynchronously clones a snapshot. - *

* * @return cancellable that may be used to cancel the request */ diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SyncedFlushResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/SyncedFlushResponse.java deleted file mode 100644 index a0c94fb75579e..0000000000000 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SyncedFlushResponse.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.opensearch.common.ParseField; -import org.opensearch.common.ParsingException; -import org.opensearch.common.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.ToXContentFragment; -import org.opensearch.common.xcontent.ToXContentObject; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentLocation; -import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentParser.Token; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - -public class SyncedFlushResponse implements ToXContentObject { - - public static final String SHARDS_FIELD = "_shards"; - - private ShardCounts totalCounts; - private Map indexResults; - - SyncedFlushResponse(ShardCounts totalCounts, Map indexResults) { - this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed); - this.indexResults = Collections.unmodifiableMap(indexResults); - } - - /** - * @return The total number of shard copies that were processed across all indexes - */ - public int totalShards() { - return totalCounts.total; - } - - /** - * @return The number of successful shard copies that were processed across all indexes - */ - public int successfulShards() { - return totalCounts.successful; - } - - /** - * @return The number of failed shard copies that were processed across all indexes - */ - public int failedShards() { - return totalCounts.failed; - } - - /** - * @return A map of results for each index where the keys of the map are the index names - * and the values are the results encapsulated in {@link IndexResult}. - */ - public Map getIndexResults() { - return indexResults; - } - - ShardCounts getShardCounts() { - return totalCounts; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startObject(SHARDS_FIELD); - totalCounts.toXContent(builder, params); - builder.endObject(); - for (Map.Entry entry : indexResults.entrySet()) { - String indexName = entry.getKey(); - IndexResult indexResult = entry.getValue(); - builder.startObject(indexName); - indexResult.toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); - return builder; - } - - public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser); - ShardCounts totalCounts = null; - Map indexResults = new HashMap<>(); - XContentLocation startLoc = parser.getTokenLocation(); - while (parser.nextToken().equals(Token.FIELD_NAME)) { - if (parser.currentName().equals(SHARDS_FIELD)) { - ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser); - totalCounts = ShardCounts.fromXContent(parser); - } else { - String indexName = parser.currentName(); - IndexResult indexResult = IndexResult.fromXContent(parser); - indexResults.put(indexName, indexResult); - } - } - if (totalCounts != null) { - return new SyncedFlushResponse(totalCounts, indexResults); - } else { - throw new ParsingException(startLoc, "Unable to reconstruct object. Total counts for shards couldn't be parsed."); - } - } - - /** - * Encapsulates the number of total successful and failed shard copies - */ - public static final class ShardCounts implements ToXContentFragment { - - public static final String TOTAL_FIELD = "total"; - public static final String SUCCESSFUL_FIELD = "successful"; - public static final String FAILED_FIELD = "failed"; - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "shardcounts", - a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); - } - - private int total; - private int successful; - private int failed; - - ShardCounts(int total, int successful, int failed) { - this.total = total; - this.successful = successful; - this.failed = failed; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(TOTAL_FIELD, total); - builder.field(SUCCESSFUL_FIELD, successful); - builder.field(FAILED_FIELD, failed); - return builder; - } - - public static ShardCounts fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - public boolean equals(ShardCounts other) { - if (other != null) { - return other.total == this.total && other.successful == this.successful && other.failed == this.failed; - } else { - return false; - } - } - - } - - /** - * Description for the flush/synced results for a particular index. - * This includes total, successful and failed copies along with failure description for each failed copy. - */ - public static final class IndexResult implements ToXContentFragment { - - public static final String TOTAL_FIELD = "total"; - public static final String SUCCESSFUL_FIELD = "successful"; - public static final String FAILED_FIELD = "failed"; - public static final String FAILURES_FIELD = "failures"; - - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "indexresult", - a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List) a[3]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); - PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD)); - } - - private ShardCounts counts; - private List failures; - - IndexResult(int total, int successful, int failed, List failures) { - counts = new ShardCounts(total, successful, failed); - if (failures != null) { - this.failures = Collections.unmodifiableList(failures); - } else { - this.failures = Collections.unmodifiableList(new ArrayList<>()); - } - } - - /** - * @return The total number of shard copies that were processed for this index. - */ - public int totalShards() { - return counts.total; - } - - /** - * @return The number of successful shard copies that were processed for this index. - */ - public int successfulShards() { - return counts.successful; - } - - /** - * @return The number of failed shard copies that were processed for this index. - */ - public int failedShards() { - return counts.failed; - } - - /** - * @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index. - */ - public List failures() { - return failures; - } - - ShardCounts getShardCounts() { - return counts; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - counts.toXContent(builder, params); - if (failures.size() > 0) { - builder.startArray(FAILURES_FIELD); - for (ShardFailure failure : failures) { - failure.toXContent(builder, params); - } - builder.endArray(); - } - return builder; - } - - public static IndexResult fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - } - - /** - * Description of a failed shard copy for an index. - */ - public static final class ShardFailure implements ToXContentFragment { - - public static String SHARD_ID_FIELD = "shard"; - public static String FAILURE_REASON_FIELD = "reason"; - public static String ROUTING_FIELD = "routing"; - - private int shardId; - private String failureReason; - private Map routing; - - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "shardfailure", - a -> new ShardFailure((Integer) a[0], (String) a[1], (Map) a[2]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD)); - PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD)); - PARSER.declareObject(optionalConstructorArg(), (parser, c) -> parser.map(), new ParseField(ROUTING_FIELD)); - } - - ShardFailure(int shardId, String failureReason, Map routing) { - this.shardId = shardId; - this.failureReason = failureReason; - if (routing != null) { - this.routing = Collections.unmodifiableMap(routing); - } else { - this.routing = Collections.unmodifiableMap(new HashMap<>()); - } - } - - /** - * @return Id of the shard whose copy failed - */ - public int getShardId() { - return shardId; - } - - /** - * @return Reason for failure of the shard copy - */ - public String getFailureReason() { - return failureReason; - } - - /** - * @return Additional information about the failure. - */ - public Map getRouting() { - return routing; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(SHARD_ID_FIELD, shardId); - builder.field(FAILURE_REASON_FIELD, failureReason); - if (routing.size() > 0) { - builder.field(ROUTING_FIELD, routing); - } - builder.endObject(); - return builder; - } - - public static ShardFailure fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - } -} diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java index 757e0df6aee77..fa13abf72207e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java @@ -46,24 +46,14 @@ public class TermVectorsResponse { private final String index; - private final String type; private final String id; private final long docVersion; private final boolean found; private final long tookInMillis; private final List termVectorList; - public TermVectorsResponse( - String index, - String type, - String id, - long version, - boolean found, - long tookInMillis, - List termVectorList - ) { + public TermVectorsResponse(String index, String id, long version, boolean found, long tookInMillis, List termVectorList) { this.index = index; - this.type = type; this.id = id; this.docVersion = version; this.found = found; @@ -75,19 +65,18 @@ public TermVectorsResponse( "term_vectors", true, args -> { - // as the response comes from server, we are sure that args[6] will be a list of TermVector + // as the response comes from server, we are sure that args[5] will be a list of TermVector @SuppressWarnings("unchecked") - List termVectorList = (List) args[6]; + List termVectorList = (List) args[5]; if (termVectorList != null) { Collections.sort(termVectorList, Comparator.comparing(TermVector::getFieldName)); } return new TermVectorsResponse( (String) args[0], (String) args[1], - (String) args[2], - (long) args[3], - (boolean) args[4], - (long) args[5], + (long) args[2], + (boolean) args[3], + (long) args[4], termVectorList ); } @@ -95,7 +84,6 @@ public TermVectorsResponse( static { PARSER.declareString(constructorArg(), new ParseField("_index")); - PARSER.declareString(constructorArg(), new ParseField("_type")); PARSER.declareString(optionalConstructorArg(), new ParseField("_id")); PARSER.declareLong(constructorArg(), new ParseField("_version")); PARSER.declareBoolean(constructorArg(), new ParseField("found")); @@ -118,16 +106,6 @@ public String getIndex() { return index; } - /** - * Returns the type for the response - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public String getType() { - return type; - } - /** * Returns the id of the request * can be NULL if there is no document ID @@ -171,7 +149,6 @@ public boolean equals(Object obj) { if (!(obj instanceof TermVectorsResponse)) return false; TermVectorsResponse other = (TermVectorsResponse) obj; return index.equals(other.index) - && type.equals(other.type) && Objects.equals(id, other.id) && docVersion == other.docVersion && found == other.found @@ -181,7 +158,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(index, type, id, docVersion, found, tookInMillis, termVectorList); + return Objects.hash(index, id, docVersion, found, tookInMillis, termVectorList); } public static final class TermVector { diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java index cae1298a8793d..cc9abdccf4c9f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java @@ -49,11 +49,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.rest.action.document.RestBulkAction; import org.opensearch.search.SearchHit; import org.hamcrest.Matcher; -import org.hamcrest.Matchers; import java.io.IOException; import java.util.Arrays; @@ -69,9 +66,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.fieldFromSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasIndex; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasProperty; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasType; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; @@ -96,17 +91,6 @@ private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.List ); } - private static BulkProcessor.Builder initBulkProcessorBuilderUsingTypes(BulkProcessor.Listener listener) { - return BulkProcessor.builder( - (request, bulkListener) -> highLevelClient().bulkAsync( - request, - expectWarningsOnce(RestBulkAction.TYPES_DEPRECATION_MESSAGE), - bulkListener - ), - listener - ); - } - public void testThatBulkProcessorCountIsCorrect() throws Exception { final CountDownLatch latch = new CountDownLatch(1); BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); @@ -210,7 +194,6 @@ public void testBulkProcessorConcurrentRequests() throws Exception { for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo("_doc")); // with concurrent requests > 1 we can't rely on the order of the bulk requests assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); // we do want to check that we don't get duplicate ids back @@ -317,7 +300,6 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception Set readOnlyIds = new HashSet<>(); for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); - assertThat(bulkItemResponse.getType(), equalTo("_doc")); if (bulkItemResponse.getIndex().equals("test")) { assertThat(bulkItemResponse.isFailed(), equalTo(false)); // with concurrent requests > 1 we can't rely on the order of the bulk requests @@ -346,7 +328,6 @@ public void testGlobalParametersAndSingleRequest() throws Exception { // tag::bulk-processor-mix-parameters try (BulkProcessor processor = initBulkProcessorBuilder(listener) .setGlobalIndex("tweets") - .setGlobalType("_doc") .setGlobalRouting("routing") .setGlobalPipeline("pipeline_id") .build()) { @@ -373,85 +354,9 @@ public void testGlobalParametersAndBulkProcessor() throws Exception { createIndexWithMultipleShards("test"); createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); - final String customType = "testType"; - final String ignoredType = "ignoredType"; int numDocs = randomIntBetween(10, 10); { - final CountDownLatch latch = new CountDownLatch(1); - BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); - // Check that untyped document additions inherit the global type - String globalType = customType; - String localType = null; - try ( - BulkProcessor processor = initBulkProcessorBuilderUsingTypes(listener) - // let's make sure that the bulk action limit trips, one single execution will index all the documents - .setConcurrentRequests(randomIntBetween(0, 1)) - .setBulkActions(numDocs) - .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) - .setGlobalIndex("test") - .setGlobalType(globalType) - .setGlobalRouting("routing") - .setGlobalPipeline("pipeline_id") - .build() - ) { - - indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id"); - latch.await(); - - assertThat(listener.beforeCounts.get(), equalTo(1)); - assertThat(listener.afterCounts.get(), equalTo(1)); - assertThat(listener.bulkFailures.size(), equalTo(0)); - assertResponseItems(listener.bulkItems, numDocs, globalType); - - Iterable hits = searchAll(new SearchRequest("test").routing("routing")); - - assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); - assertThat(hits, everyItem(Matchers.allOf(hasIndex("test"), hasType(globalType)))); - assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); - } - - } - { - // Check that typed document additions don't inherit the global type - String globalType = ignoredType; - String localType = customType; - final CountDownLatch latch = new CountDownLatch(1); - BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); - try ( - BulkProcessor processor = initBulkProcessorBuilderUsingTypes(listener) - // let's make sure that the bulk action limit trips, one single execution will index all the documents - .setConcurrentRequests(randomIntBetween(0, 1)) - .setBulkActions(numDocs) - .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) - .setGlobalIndex("test") - .setGlobalType(globalType) - .setGlobalRouting("routing") - .setGlobalPipeline("pipeline_id") - .build() - ) { - indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id"); - latch.await(); - - assertThat(listener.beforeCounts.get(), equalTo(1)); - assertThat(listener.afterCounts.get(), equalTo(1)); - assertThat(listener.bulkFailures.size(), equalTo(0)); - assertResponseItems(listener.bulkItems, numDocs, localType); - - Iterable hits = searchAll(new SearchRequest("test").routing("routing")); - - assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); - assertThat(hits, everyItem(Matchers.allOf(hasIndex("test"), hasType(localType)))); - assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); - } - } - { - // Check that untyped document additions and untyped global inherit the established custom type - // (the custom document type introduced to the mapping by the earlier code in this test) - String globalType = null; - String localType = null; final CountDownLatch latch = new CountDownLatch(1); BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); try ( @@ -462,23 +367,22 @@ public void testGlobalParametersAndBulkProcessor() throws Exception { .setFlushInterval(TimeValue.timeValueHours(24)) .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setGlobalIndex("test") - .setGlobalType(globalType) .setGlobalRouting("routing") .setGlobalPipeline("pipeline_id") .build() ) { - indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id"); + + indexDocs(processor, numDocs, null, "test", "pipeline_id"); latch.await(); assertThat(listener.beforeCounts.get(), equalTo(1)); assertThat(listener.afterCounts.get(), equalTo(1)); assertThat(listener.bulkFailures.size(), equalTo(0)); - assertResponseItems(listener.bulkItems, numDocs, MapperService.SINGLE_MAPPING_NAME); + assertResponseItems(listener.bulkItems, numDocs); Iterable hits = searchAll(new SearchRequest("test").routing("routing")); assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); - assertThat(hits, everyItem(Matchers.allOf(hasIndex("test"), hasType(customType)))); assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); } } @@ -489,45 +393,31 @@ private Matcher[] expectedIds(int numDocs) { return IntStream.rangeClosed(1, numDocs).boxed().map(n -> hasId(n.toString())).>toArray(Matcher[]::new); } - private MultiGetRequest indexDocs( - BulkProcessor processor, - int numDocs, - String localIndex, - String localType, - String globalIndex, - String globalType, - String globalPipeline - ) throws Exception { + private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs, String localIndex, String globalIndex, String globalPipeline) + throws Exception { MultiGetRequest multiGetRequest = new MultiGetRequest(); for (int i = 1; i <= numDocs; i++) { if (randomBoolean()) { processor.add( - new IndexRequest(localIndex, localType, Integer.toString(i)).source( - XContentType.JSON, - "field", - randomRealisticUnicodeOfLengthBetween(1, 30) - ) + new IndexRequest(localIndex).id(Integer.toString(i)) + .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30)) ); } else { - BytesArray data = bytesBulkRequest(localIndex, localType, i); - processor.add(data, globalIndex, globalType, globalPipeline, XContentType.JSON); + BytesArray data = bytesBulkRequest(localIndex, i); + processor.add(data, globalIndex, globalPipeline, XContentType.JSON); } multiGetRequest.add(localIndex, Integer.toString(i)); } return multiGetRequest; } - private static BytesArray bytesBulkRequest(String localIndex, String localType, int id) throws IOException { + private static BytesArray bytesBulkRequest(String localIndex, int id) throws IOException { XContentBuilder action = jsonBuilder().startObject().startObject("index"); if (localIndex != null) { action.field("_index", localIndex); } - if (localType != null) { - action.field("_type", localType); - } - action.field("_id", Integer.toString(id)); action.endObject().endObject(); @@ -538,19 +428,14 @@ private static BytesArray bytesBulkRequest(String localIndex, String localType, } private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { - return indexDocs(processor, numDocs, "test", null, null, null, null); + return indexDocs(processor, numDocs, "test", null, null); } private static void assertResponseItems(List bulkItemResponses, int numDocs) { - assertResponseItems(bulkItemResponses, numDocs, MapperService.SINGLE_MAPPING_NAME); - } - - private static void assertResponseItems(List bulkItemResponses, int numDocs, String expectedType) { assertThat(bulkItemResponses.size(), is(numDocs)); int i = 1; for (BulkItemResponse bulkItemResponse : bulkItemResponses) { assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo(expectedType)); assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); assertThat( "item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), @@ -565,7 +450,6 @@ private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, in int i = 1; for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) { assertThat(multiGetItemResponse.getIndex(), equalTo("test")); - assertThat(multiGetItemResponse.getType(), equalTo("_doc")); assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++))); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java index d42cb7abe2c4c..35fc9d88e316c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.action.document.RestBulkAction; import org.opensearch.search.SearchHit; import java.io.IOException; @@ -46,7 +45,6 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasIndex; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasProperty; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasType; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.emptyIterable; @@ -117,7 +115,7 @@ public void testMixPipelineOnRequestAndGlobal() throws IOException { } public void testGlobalIndex() throws IOException { - BulkRequest request = new BulkRequest("global_index", null); + BulkRequest request = new BulkRequest("global_index"); request.add(new IndexRequest().id("1").source(XContentType.JSON, "field", "bulk1")); request.add(new IndexRequest().id("2").source(XContentType.JSON, "field", "bulk2")); @@ -129,7 +127,7 @@ public void testGlobalIndex() throws IOException { @SuppressWarnings("unchecked") public void testIndexGlobalAndPerRequest() throws IOException { - BulkRequest request = new BulkRequest("global_index", null); + BulkRequest request = new BulkRequest("global_index"); request.add(new IndexRequest("local_index").id("1").source(XContentType.JSON, "field", "bulk1")); request.add( new IndexRequest().id("2") // will take global index @@ -142,31 +140,6 @@ public void testIndexGlobalAndPerRequest() throws IOException { assertThat(hits, containsInAnyOrder(both(hasId("1")).and(hasIndex("local_index")), both(hasId("2")).and(hasIndex("global_index")))); } - public void testGlobalType() throws IOException { - BulkRequest request = new BulkRequest(null, "global_type"); - request.add(new IndexRequest("index").id("1").source(XContentType.JSON, "field", "bulk1")); - request.add(new IndexRequest("index").id("2").source(XContentType.JSON, "field", "bulk2")); - - bulkWithTypes(request); - - Iterable hits = searchAll("index"); - assertThat(hits, everyItem(hasType("global_type"))); - } - - public void testTypeGlobalAndPerRequest() throws IOException { - BulkRequest request = new BulkRequest(null, "global_type"); - request.add(new IndexRequest("index1", "local_type", "1").source(XContentType.JSON, "field", "bulk1")); - request.add( - new IndexRequest("index2").id("2") // will take global type - .source(XContentType.JSON, "field", "bulk2") - ); - - bulkWithTypes(request); - - Iterable hits = searchAll("index1", "index2"); - assertThat(hits, containsInAnyOrder(both(hasId("1")).and(hasType("local_type")), both(hasId("2")).and(hasType("global_type")))); - } - public void testGlobalRouting() throws IOException { createIndexWithMultipleShards("index"); BulkRequest request = new BulkRequest((String) null); @@ -194,28 +167,6 @@ public void testMixLocalAndGlobalRouting() throws IOException { assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); } - public void testGlobalIndexNoTypes() throws IOException { - BulkRequest request = new BulkRequest("global_index"); - request.add(new IndexRequest().id("1").source(XContentType.JSON, "field", "bulk1")); - request.add(new IndexRequest().id("2").source(XContentType.JSON, "field", "bulk2")); - - bulk(request); - - Iterable hits = searchAll("global_index"); - assertThat(hits, everyItem(hasIndex("global_index"))); - } - - private BulkResponse bulkWithTypes(BulkRequest request) throws IOException { - BulkResponse bulkResponse = execute( - request, - highLevelClient()::bulk, - highLevelClient()::bulkAsync, - expectWarningsOnce(RestBulkAction.TYPES_DEPRECATION_MESSAGE) - ); - assertFalse(bulkResponse.hasFailures()); - return bulkResponse; - } - private BulkResponse bulk(BulkRequest request) throws IOException { BulkResponse bulkResponse = execute(request, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT); assertFalse(bulkResponse.hasFailures()); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java index 048e2060bb826..999c2a0e7643b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java @@ -69,12 +69,6 @@ import org.opensearch.index.VersionType; import org.opensearch.index.get.GetResult; import org.opensearch.rest.RestStatus; -import org.opensearch.rest.action.document.RestBulkAction; -import org.opensearch.rest.action.document.RestDeleteAction; -import org.opensearch.rest.action.document.RestGetAction; -import org.opensearch.rest.action.document.RestIndexAction; -import org.opensearch.rest.action.document.RestMultiGetAction; -import org.opensearch.rest.action.document.RestUpdateAction; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -114,7 +108,6 @@ public void testDelete() throws IOException { } DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); - assertEquals("_doc", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); } @@ -124,7 +117,6 @@ public void testDelete() throws IOException { DeleteRequest deleteRequest = new DeleteRequest("index", docId); DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); - assertEquals("_doc", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); } @@ -163,7 +155,6 @@ public void testDelete() throws IOException { DeleteRequest deleteRequest = new DeleteRequest("index", docId).versionType(VersionType.EXTERNAL).version(13); DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); - assertEquals("_doc", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); } @@ -200,37 +191,11 @@ public void testDelete() throws IOException { DeleteRequest deleteRequest = new DeleteRequest("index", docId).routing("foo"); DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); - assertEquals("_doc", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); } } - public void testDeleteWithTypes() throws IOException { - String docId = "id"; - IndexRequest indexRequest = new IndexRequest("index", "type", docId); - indexRequest.source(Collections.singletonMap("foo", "bar")); - execute( - indexRequest, - highLevelClient()::index, - highLevelClient()::indexAsync, - expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE) - ); - - DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId); - DeleteResponse deleteResponse = execute( - deleteRequest, - highLevelClient()::delete, - highLevelClient()::deleteAsync, - expectWarningsOnce(RestDeleteAction.TYPES_DEPRECATION_MESSAGE) - ); - - assertEquals("index", deleteResponse.getIndex()); - assertEquals("type", deleteResponse.getType()); - assertEquals(docId, deleteResponse.getId()); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - } - public void testExists() throws IOException { { GetRequest getRequest = new GetRequest("index", "id"); @@ -366,7 +331,6 @@ public void testGet() throws IOException { } GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); - assertEquals("_doc", getResponse.getType()); assertEquals("id", getResponse.getId()); assertTrue(getResponse.isExists()); assertFalse(getResponse.isSourceEmpty()); @@ -377,7 +341,6 @@ public void testGet() throws IOException { GetRequest getRequest = new GetRequest("index", "does_not_exist"); GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); - assertEquals("_doc", getResponse.getType()); assertEquals("does_not_exist", getResponse.getId()); assertFalse(getResponse.isExists()); assertEquals(-1, getResponse.getVersion()); @@ -389,7 +352,6 @@ public void testGet() throws IOException { getRequest.fetchSourceContext(new FetchSourceContext(false, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)); GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); - assertEquals("_doc", getResponse.getType()); assertEquals("id", getResponse.getId()); assertTrue(getResponse.isExists()); assertTrue(getResponse.isSourceEmpty()); @@ -405,7 +367,6 @@ public void testGet() throws IOException { } GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); - assertEquals("_doc", getResponse.getType()); assertEquals("id", getResponse.getId()); assertTrue(getResponse.isExists()); assertFalse(getResponse.isSourceEmpty()); @@ -416,36 +377,6 @@ public void testGet() throws IOException { } } - public void testGetWithTypes() throws IOException { - String document = "{\"field\":\"value\"}"; - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); - indexRequest.source(document, XContentType.JSON); - indexRequest.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - execute( - indexRequest, - highLevelClient()::index, - highLevelClient()::indexAsync, - expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE) - ); - - GetRequest getRequest = new GetRequest("index", "type", "id"); - GetResponse getResponse = execute( - getRequest, - highLevelClient()::get, - highLevelClient()::getAsync, - expectWarningsOnce(RestGetAction.TYPES_DEPRECATION_MESSAGE) - ); - - assertEquals("index", getResponse.getIndex()); - assertEquals("type", getResponse.getType()); - assertEquals("id", getResponse.getId()); - - assertTrue(getResponse.isExists()); - assertFalse(getResponse.isSourceEmpty()); - assertEquals(1L, getResponse.getVersion()); - assertEquals(document, getResponse.getSourceAsString()); - } - public void testMultiGet() throws IOException { { MultiGetRequest multiGetRequest = new MultiGetRequest(); @@ -457,7 +388,6 @@ public void testMultiGet() throws IOException { assertTrue(response.getResponses()[0].isFailed()); assertNull(response.getResponses()[0].getResponse()); assertEquals("id1", response.getResponses()[0].getFailure().getId()); - assertNull(response.getResponses()[0].getFailure().getType()); assertEquals("index", response.getResponses()[0].getFailure().getIndex()); assertEquals( "OpenSearch exception [type=index_not_found_exception, reason=no such index [index]]", @@ -467,7 +397,6 @@ public void testMultiGet() throws IOException { assertTrue(response.getResponses()[1].isFailed()); assertNull(response.getResponses()[1].getResponse()); assertEquals("id2", response.getResponses()[1].getId()); - assertNull(response.getResponses()[1].getType()); assertEquals("index", response.getResponses()[1].getIndex()); assertEquals( "OpenSearch exception [type=index_not_found_exception, reason=no such index [index]]", @@ -493,47 +422,26 @@ public void testMultiGet() throws IOException { assertFalse(response.getResponses()[0].isFailed()); assertNull(response.getResponses()[0].getFailure()); assertEquals("id1", response.getResponses()[0].getId()); - assertEquals("_doc", response.getResponses()[0].getType()); assertEquals("index", response.getResponses()[0].getIndex()); assertEquals(Collections.singletonMap("field", "value1"), response.getResponses()[0].getResponse().getSource()); assertFalse(response.getResponses()[1].isFailed()); assertNull(response.getResponses()[1].getFailure()); assertEquals("id2", response.getResponses()[1].getId()); - assertEquals("_doc", response.getResponses()[1].getType()); assertEquals("index", response.getResponses()[1].getIndex()); assertEquals(Collections.singletonMap("field", "value2"), response.getResponses()[1].getResponse().getSource()); } } - public void testMultiGetWithTypes() throws IOException { + public void testMultiGetWithIds() throws IOException { BulkRequest bulk = new BulkRequest(); bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - bulk.add(new IndexRequest("index", "type", "id1").source("{\"field\":\"value1\"}", XContentType.JSON)); - bulk.add(new IndexRequest("index", "type", "id2").source("{\"field\":\"value2\"}", XContentType.JSON)); + bulk.add(new IndexRequest("index").id("id1").source("{\"field\":\"value1\"}", XContentType.JSON)); + bulk.add(new IndexRequest("index").id("id2").source("{\"field\":\"value2\"}", XContentType.JSON)); - highLevelClient().bulk(bulk, expectWarningsOnce(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "id1"); - multiGetRequest.add("index", "type", "id2"); - - MultiGetResponse response = execute( - multiGetRequest, - highLevelClient()::mget, - highLevelClient()::mgetAsync, - expectWarningsOnce(RestMultiGetAction.TYPES_DEPRECATION_MESSAGE) - ); - assertEquals(2, response.getResponses().length); - - GetResponse firstResponse = response.getResponses()[0].getResponse(); - assertEquals("index", firstResponse.getIndex()); - assertEquals("type", firstResponse.getType()); - assertEquals("id1", firstResponse.getId()); - - GetResponse secondResponse = response.getResponses()[1].getResponse(); - assertEquals("index", secondResponse.getIndex()); - assertEquals("type", secondResponse.getType()); - assertEquals("id2", secondResponse.getId()); + multiGetRequest.add("index", "id2"); } public void testGetSource() throws IOException { @@ -568,7 +476,7 @@ public void testGetSource() throws IOException { ); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertEquals( - "OpenSearch exception [type=resource_not_found_exception, " + "reason=Document not found [index]/[_doc]/[does_not_exist]]", + "OpenSearch exception [type=resource_not_found_exception, " + "reason=Document not found [index]/[does_not_exist]]", exception.getMessage() ); } @@ -622,7 +530,6 @@ public void testIndex() throws IOException { assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); assertEquals("index", indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertTrue(Strings.hasLength(indexResponse.getId())); assertEquals(1L, indexResponse.getVersion()); assertNotNull(indexResponse.getShardId()); @@ -642,7 +549,6 @@ public void testIndex() throws IOException { IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertEquals("id", indexResponse.getId()); assertEquals(1L, indexResponse.getVersion()); @@ -652,7 +558,6 @@ public void testIndex() throws IOException { indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.OK, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertEquals("id", indexResponse.getId()); assertEquals(2L, indexResponse.getVersion()); @@ -710,7 +615,6 @@ public void testIndex() throws IOException { IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertEquals("external_version_type", indexResponse.getId()); assertEquals(12L, indexResponse.getVersion()); } @@ -722,7 +626,6 @@ public void testIndex() throws IOException { IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertEquals("with_create_op_type", indexResponse.getId()); OpenSearchStatusException exception = expectThrows( @@ -739,22 +642,6 @@ public void testIndex() throws IOException { } } - public void testIndexWithTypes() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); - IndexRequest indexRequest = new IndexRequest("index", "some_type", "some_id"); - indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("test", "test").endObject()); - IndexResponse indexResponse = execute( - indexRequest, - highLevelClient()::index, - highLevelClient()::indexAsync, - expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE) - ); - assertEquals(RestStatus.CREATED, indexResponse.status()); - assertEquals("index", indexResponse.getIndex()); - assertEquals("some_type", indexResponse.getType()); - assertEquals("some_id", indexResponse.getId()); - } - public void testUpdate() throws IOException { { UpdateRequest updateRequest = new UpdateRequest("index", "does_not_exist"); @@ -766,7 +653,7 @@ public void testUpdate() throws IOException { ); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertEquals( - "OpenSearch exception [type=document_missing_exception, reason=[_doc][does_not_exist]: document missing]", + "OpenSearch exception [type=document_missing_exception, reason=[does_not_exist]: document missing]", exception.getMessage() ); } @@ -891,7 +778,6 @@ public void testUpdate() throws IOException { UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); - assertEquals("_doc", updateResponse.getType()); assertEquals("with_upsert", updateResponse.getId()); GetResult getResult = updateResponse.getGetResult(); assertEquals(1L, updateResponse.getVersion()); @@ -906,7 +792,6 @@ public void testUpdate() throws IOException { UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); - assertEquals("_doc", updateResponse.getType()); assertEquals("with_doc_as_upsert", updateResponse.getId()); GetResult getResult = updateResponse.getGetResult(); assertEquals(1L, updateResponse.getVersion()); @@ -922,7 +807,6 @@ public void testUpdate() throws IOException { UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); - assertEquals("_doc", updateResponse.getType()); assertEquals("with_scripted_upsert", updateResponse.getId()); GetResult getResult = updateResponse.getGetResult(); @@ -955,29 +839,6 @@ public void testUpdate() throws IOException { } } - public void testUpdateWithTypes() throws IOException { - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); - indexRequest.source(singletonMap("field", "value")); - IndexResponse indexResponse = execute( - indexRequest, - highLevelClient()::index, - highLevelClient()::indexAsync, - expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE) - ); - - UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); - updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); - UpdateResponse updateResponse = execute( - updateRequest, - highLevelClient()::update, - highLevelClient()::updateAsync, - expectWarningsOnce(RestUpdateAction.TYPES_DEPRECATION_MESSAGE) - ); - - assertEquals(RestStatus.OK, updateResponse.status()); - assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion()); - } - public void testBulk() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; @@ -1141,7 +1002,6 @@ private void validateBulkResponses(int nbItems, boolean[] errors, BulkResponse b assertEquals(i, bulkItemResponse.getItemId()); assertEquals("index", bulkItemResponse.getIndex()); - assertEquals("_doc", bulkItemResponse.getType()); assertEquals(String.valueOf(i), bulkItemResponse.getId()); DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType(); @@ -1167,7 +1027,6 @@ public void testUrlEncode() throws IOException { indexRequest.source("field", "value"); IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(expectedIndex, indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertEquals("id#1", indexResponse.getId()); } { @@ -1175,7 +1034,6 @@ public void testUrlEncode() throws IOException { GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals(expectedIndex, getResponse.getIndex()); - assertEquals("_doc", getResponse.getType()); assertEquals("id#1", getResponse.getId()); } @@ -1185,7 +1043,6 @@ public void testUrlEncode() throws IOException { indexRequest.source("field", "value"); IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals("index", indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertEquals(docId, indexResponse.getId()); } { @@ -1193,7 +1050,6 @@ public void testUrlEncode() throws IOException { GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals("index", getResponse.getIndex()); - assertEquals("_doc", getResponse.getType()); assertEquals(docId, getResponse.getId()); } @@ -1209,7 +1065,6 @@ public void testParamsEncode() throws IOException { indexRequest.routing(routing); IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals("index", indexResponse.getIndex()); - assertEquals("_doc", indexResponse.getType()); assertEquals("id", indexResponse.getId()); } { @@ -1217,7 +1072,6 @@ public void testParamsEncode() throws IOException { GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals("index", getResponse.getIndex()); - assertEquals("_doc", getResponse.getType()); assertEquals("id", getResponse.getId()); assertEquals(routing, getResponse.getField("_routing").getValue()); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index d33abb0552776..69ce518173042 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -46,7 +46,6 @@ import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.open.OpenIndexRequest; @@ -126,15 +125,11 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.indices.flush.SyncedFlushService; import org.opensearch.rest.RestStatus; import org.opensearch.rest.action.admin.indices.RestCreateIndexAction; -import org.opensearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.opensearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestGetIndicesAction; -import org.opensearch.rest.action.admin.indices.RestGetMappingAction; import org.opensearch.rest.action.admin.indices.RestPutIndexTemplateAction; -import org.opensearch.rest.action.admin.indices.RestPutMappingAction; import org.opensearch.rest.action.admin.indices.RestRolloverIndexAction; import java.io.IOException; @@ -583,32 +578,6 @@ public void testPutMapping() throws IOException { assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.properties.field.type", getIndexResponse)); } - public void testPutMappingWithTypes() throws IOException { - String indexName = "mapping_index"; - createIndex(indexName, Settings.EMPTY); - - org.opensearch.action.admin.indices.mapping.put.PutMappingRequest putMappingRequest = - new org.opensearch.action.admin.indices.mapping.put.PutMappingRequest(indexName); - putMappingRequest.type("some_type"); - - XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); - mappingBuilder.startObject().startObject("properties").startObject("field"); - mappingBuilder.field("type", "text"); - mappingBuilder.endObject().endObject().endObject(); - putMappingRequest.source(mappingBuilder); - - AcknowledgedResponse putMappingResponse = execute( - putMappingRequest, - highLevelClient().indices()::putMapping, - highLevelClient().indices()::putMappingAsync, - expectWarningsOnce(RestPutMappingAction.TYPES_DEPRECATION_MESSAGE) - ); - assertTrue(putMappingResponse.isAcknowledged()); - - Map getIndexResponse = getAsMap(indexName); - assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.properties.field.type", getIndexResponse)); - } - public void testGetMapping() throws IOException { String indexName = "test"; createIndex(indexName, Settings.EMPTY); @@ -648,47 +617,6 @@ public void testGetMapping() throws IOException { assertThat(mappings, equalTo(expected)); } - public void testGetMappingWithTypes() throws IOException { - String indexName = "test"; - createIndex(indexName, Settings.EMPTY); - - PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); - XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); - mappingBuilder.startObject().startObject("properties").startObject("field"); - mappingBuilder.field("type", "text"); - mappingBuilder.endObject().endObject().endObject(); - putMappingRequest.source(mappingBuilder); - - AcknowledgedResponse putMappingResponse = execute( - putMappingRequest, - highLevelClient().indices()::putMapping, - highLevelClient().indices()::putMappingAsync - ); - assertTrue(putMappingResponse.isAcknowledged()); - - Map getIndexResponse = getAsMap(indexName); - assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.properties.field.type", getIndexResponse)); - - org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest request = - new org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest().indices(indexName); - - org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse getMappingsResponse = execute( - request, - highLevelClient().indices()::getMapping, - highLevelClient().indices()::getMappingAsync, - expectWarningsOnce(RestGetMappingAction.TYPES_DEPRECATION_MESSAGE) - ); - - Map mappings = getMappingsResponse.getMappings().get(indexName).get("_doc").sourceAsMap(); - Map type = new HashMap<>(); - type.put("type", "text"); - Map field = new HashMap<>(); - field.put("field", type); - Map expected = new HashMap<>(); - expected.put("properties", field); - assertThat(mappings, equalTo(expected)); - } - public void testGetFieldMapping() throws IOException { String indexName = "test"; createIndex(indexName, Settings.EMPTY); @@ -725,45 +653,6 @@ public void testGetFieldMapping() throws IOException { assertThat(fieldMappingMap, equalTo(Collections.singletonMap("field", metadata))); } - public void testGetFieldMappingWithTypes() throws IOException { - String indexName = "test"; - createIndex(indexName, Settings.EMPTY); - - PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); - XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); - mappingBuilder.startObject().startObject("properties").startObject("field"); - mappingBuilder.field("type", "text"); - mappingBuilder.endObject().endObject().endObject(); - putMappingRequest.source(mappingBuilder); - - AcknowledgedResponse putMappingResponse = execute( - putMappingRequest, - highLevelClient().indices()::putMapping, - highLevelClient().indices()::putMappingAsync - ); - assertTrue(putMappingResponse.isAcknowledged()); - - org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest = - new org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsRequest().indices(indexName).types("_doc").fields("field"); - - org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse getFieldMappingsResponse = execute( - getFieldMappingsRequest, - highLevelClient().indices()::getFieldMapping, - highLevelClient().indices()::getFieldMappingAsync, - expectWarningsOnce(RestGetFieldMappingAction.TYPES_DEPRECATION_MESSAGE) - ); - - final Map fieldMappingMap = - getFieldMappingsResponse.mappings().get(indexName).get("_doc"); - - final org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata metadata = - new org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata( - "field", - new BytesArray("{\"field\":{\"type\":\"text\"}}") - ); - assertThat(fieldMappingMap, equalTo(Collections.singletonMap("field", metadata))); - } - public void testDeleteIndex() throws IOException { { // Delete index if exists @@ -1080,39 +969,6 @@ public void testFlush() throws IOException { } } - public void testSyncedFlush() throws IOException { - { - String index = "index"; - Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); - createIndex(index, settings); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index); - SyncedFlushResponse flushResponse = execute( - syncedFlushRequest, - highLevelClient().indices()::flushSynced, - highLevelClient().indices()::flushSyncedAsync, - expectWarningsOnce(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE) - ); - assertThat(flushResponse.totalShards(), equalTo(1)); - assertThat(flushResponse.successfulShards(), equalTo(1)); - assertThat(flushResponse.failedShards(), equalTo(0)); - } - { - String nonExistentIndex = "non_existent_index"; - assertFalse(indexExists(nonExistentIndex)); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex); - OpenSearchException exception = expectThrows( - OpenSearchException.class, - () -> execute( - syncedFlushRequest, - highLevelClient().indices()::flushSynced, - highLevelClient().indices()::flushSyncedAsync, - expectWarningsOnce(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE) - ) - ); - assertEquals(RestStatus.NOT_FOUND, exception.status()); - } - } - public void testClearCache() throws IOException { { String index = "index"; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index 28728a95ae976..0ea2280b386eb 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -45,7 +45,6 @@ import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.refresh.RefreshRequest; @@ -750,33 +749,6 @@ public void testFlush() { Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); } - public void testSyncedFlush() { - String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); - SyncedFlushRequest syncedFlushRequest; - if (OpenSearchTestCase.randomBoolean()) { - syncedFlushRequest = new SyncedFlushRequest(indices); - } else { - syncedFlushRequest = new SyncedFlushRequest(); - syncedFlushRequest.indices(indices); - } - Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomIndicesOptions( - syncedFlushRequest::indicesOptions, - syncedFlushRequest::indicesOptions, - expectedParams - ); - Request request = IndicesRequestConverters.flushSynced(syncedFlushRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_flush/synced"); - Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - Assert.assertThat(request.getParameters(), equalTo(expectedParams)); - Assert.assertThat(request.getEntity(), nullValue()); - Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - public void testForceMerge() { String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); ForceMergeRequest forceMergeRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 51b0ce00a14cd..32c6cde0725b4 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -172,10 +172,6 @@ public void testGet() { getAndExistsTest(RequestConverters::get, HttpGet.METHOD_NAME); } - public void testGetWithType() { - getAndExistsWithTypeTest(RequestConverters::get, HttpGet.METHOD_NAME); - } - public void testSourceExists() throws IOException { doTestSourceExists((index, id) -> new GetSourceRequest(index, id)); } @@ -221,13 +217,7 @@ private static void doTestSourceExists(BiFunction requestConverter, String method) { String index = randomAlphaOfLengthBetween(3, 10); String id = randomAlphaOfLengthBetween(3, 10); @@ -435,18 +409,6 @@ private static void getAndExistsTest(Function requestConver assertEquals(method, request.getMethod()); } - private static void getAndExistsWithTypeTest(Function requestConverter, String method) { - String index = randomAlphaOfLengthBetween(3, 10); - String type = randomAlphaOfLengthBetween(3, 10); - String id = randomAlphaOfLengthBetween(3, 10); - GetRequest getRequest = new GetRequest(index, type, id); - - Request request = requestConverter.apply(getRequest); - assertEquals("/" + index + "/" + type + "/" + id, request.getEndpoint()); - assertNull(request.getEntity()); - assertEquals(method, request.getMethod()); - } - public void testReindex() throws IOException { ReindexRequest reindexRequest = new ReindexRequest(); reindexRequest.setSourceIndices("source_idx"); @@ -468,15 +430,9 @@ public void testReindex() throws IOException { ); reindexRequest.setRemoteInfo(remoteInfo); } - if (randomBoolean()) { - reindexRequest.setSourceDocTypes("doc", "tweet"); - } if (randomBoolean()) { reindexRequest.setSourceBatchSize(randomInt(100)); } - if (randomBoolean()) { - reindexRequest.setDestDocType("tweet_and_doc"); - } if (randomBoolean()) { reindexRequest.setDestOpType("create"); } @@ -536,9 +492,6 @@ public void testUpdateByQuery() throws IOException { UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); updateByQueryRequest.indices(randomIndicesNames(1, 5)); Map expectedParams = new HashMap<>(); - if (randomBoolean()) { - updateByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); - } if (randomBoolean()) { int batchSize = randomInt(100); updateByQueryRequest.setBatchSize(batchSize); @@ -600,9 +553,6 @@ public void testUpdateByQuery() throws IOException { Request request = RequestConverters.updateByQuery(updateByQueryRequest); StringJoiner joiner = new StringJoiner("/", "/", ""); joiner.add(String.join(",", updateByQueryRequest.indices())); - if (updateByQueryRequest.getDocTypes().length > 0) { - joiner.add(String.join(",", updateByQueryRequest.getDocTypes())); - } joiner.add("_update_by_query"); assertEquals(joiner.toString(), request.getEndpoint()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); @@ -614,9 +564,6 @@ public void testDeleteByQuery() throws IOException { DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); deleteByQueryRequest.indices(randomIndicesNames(1, 5)); Map expectedParams = new HashMap<>(); - if (randomBoolean()) { - deleteByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); - } if (randomBoolean()) { int batchSize = randomInt(100); deleteByQueryRequest.setBatchSize(batchSize); @@ -671,9 +618,6 @@ public void testDeleteByQuery() throws IOException { Request request = RequestConverters.deleteByQuery(deleteByQueryRequest); StringJoiner joiner = new StringJoiner("/", "/", ""); joiner.add(String.join(",", deleteByQueryRequest.indices())); - if (deleteByQueryRequest.getDocTypes().length > 0) { - joiner.add(String.join(",", deleteByQueryRequest.getDocTypes())); - } joiner.add("_delete_by_query"); assertEquals(joiner.toString(), request.getEndpoint()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); @@ -793,49 +737,6 @@ public void testIndex() throws IOException { } } - public void testIndexWithType() throws IOException { - String index = randomAlphaOfLengthBetween(3, 10); - String type = randomAlphaOfLengthBetween(3, 10); - IndexRequest indexRequest = new IndexRequest(index, type); - String id = randomBoolean() ? randomAlphaOfLengthBetween(3, 10) : null; - indexRequest.id(id); - - String method = HttpPost.METHOD_NAME; - if (id != null) { - method = HttpPut.METHOD_NAME; - if (randomBoolean()) { - indexRequest.opType(DocWriteRequest.OpType.CREATE); - } - } - XContentType xContentType = randomFrom(XContentType.values()); - int nbFields = randomIntBetween(0, 10); - try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { - builder.startObject(); - for (int i = 0; i < nbFields; i++) { - builder.field("field_" + i, i); - } - builder.endObject(); - indexRequest.source(builder); - } - - Request request = RequestConverters.index(indexRequest); - if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { - assertEquals("/" + index + "/" + type + "/" + id + "/_create", request.getEndpoint()); - } else if (id != null) { - assertEquals("/" + index + "/" + type + "/" + id, request.getEndpoint()); - } else { - assertEquals("/" + index + "/" + type, request.getEndpoint()); - } - assertEquals(method, request.getMethod()); - - HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); - assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType().getValue()); - try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) { - assertEquals(nbFields, parser.map().size()); - } - } - public void testUpdate() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); @@ -944,23 +845,6 @@ private static void setRandomIfSeqNoAndTerm(DocWriteRequest request, Map { UpdateRequest updateRequest = new UpdateRequest(); @@ -1055,7 +939,6 @@ public void testBulk() throws IOException { assertEquals(originalRequest.opType(), parsedRequest.opType()); assertEquals(originalRequest.index(), parsedRequest.index()); - assertEquals(originalRequest.type(), parsedRequest.type()); assertEquals(originalRequest.id(), parsedRequest.id()); assertEquals(originalRequest.routing(), parsedRequest.routing()); assertEquals(originalRequest.version(), parsedRequest.version()); @@ -1191,10 +1074,6 @@ public void testSearch() throws Exception { if (Strings.hasLength(index)) { endpoint.add(index); } - String type = String.join(",", searchRequest.types()); - if (Strings.hasLength(type)) { - endpoint.add(type); - } endpoint.add(searchEndpoint); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(endpoint.toString(), request.getEndpoint()); @@ -1204,14 +1083,6 @@ public void testSearch() throws Exception { public static SearchRequest createTestSearchRequest(String[] indices, Map expectedParams) { SearchRequest searchRequest = new SearchRequest(indices); - - int numTypes = randomIntBetween(0, 5); - String[] types = new String[numTypes]; - for (int i = 0; i < numTypes; i++) { - types[i] = "type-" + randomAlphaOfLengthBetween(2, 5); - } - searchRequest.types(types); - setRandomSearchParams(searchRequest, expectedParams); setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); @@ -1278,7 +1149,6 @@ public static SearchSourceBuilder createTestSearchSourceBuilder() { public void testSearchNullIndicesAndTypes() { expectThrows(NullPointerException.class, () -> new SearchRequest((String[]) null)); expectThrows(NullPointerException.class, () -> new SearchRequest().indices((String[]) null)); - expectThrows(NullPointerException.class, () -> new SearchRequest().types((String[]) null)); } public void testCountNotNullSource() throws IOException { @@ -1293,14 +1163,6 @@ public void testCountNotNullSource() throws IOException { public void testCount() throws Exception { String[] indices = randomIndicesNames(0, 5); CountRequest countRequest = new CountRequest(indices); - - int numTypes = randomIntBetween(0, 5); - String[] types = new String[numTypes]; - for (int i = 0; i < numTypes; i++) { - types[i] = "type-" + randomAlphaOfLengthBetween(2, 5); - } - countRequest.types(types); - Map expectedParams = new HashMap<>(); setRandomCountParams(countRequest, expectedParams); setRandomIndicesOptions(countRequest::indicesOptions, countRequest::indicesOptions, expectedParams); @@ -1317,10 +1179,6 @@ public void testCount() throws Exception { if (Strings.hasLength(index)) { endpoint.add(index); } - String type = String.join(",", types); - if (Strings.hasLength(type)) { - endpoint.add(type); - } endpoint.add("_count"); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(endpoint.toString(), request.getEndpoint()); @@ -1328,12 +1186,6 @@ public void testCount() throws Exception { assertToXContentBody(countRequest, request.getEntity()); } - public void testCountNullIndicesAndTypes() { - expectThrows(NullPointerException.class, () -> new CountRequest((String[]) null)); - expectThrows(NullPointerException.class, () -> new CountRequest().indices((String[]) null)); - expectThrows(NullPointerException.class, () -> new CountRequest().types((String[]) null)); - } - private static void setRandomCountParams(CountRequest countRequest, Map expectedParams) { if (randomBoolean()) { countRequest.routing(randomAlphaOfLengthBetween(3, 10)); @@ -1416,7 +1268,6 @@ public void testMultiSearch() throws IOException { null, null, null, - null, xContentRegistry(), true, deprecationLogger @@ -1602,21 +1453,6 @@ public void testExplain() throws IOException { assertToXContentBody(explainRequest, request.getEntity()); } - public void testExplainWithType() throws IOException { - String index = randomAlphaOfLengthBetween(3, 10); - String type = randomAlphaOfLengthBetween(3, 10); - String id = randomAlphaOfLengthBetween(3, 10); - - ExplainRequest explainRequest = new ExplainRequest(index, type, id); - explainRequest.query(QueryBuilders.termQuery(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))); - - Request request = RequestConverters.explain(explainRequest); - assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - assertEquals("/" + index + "/" + type + "/" + id + "/_explain", request.getEndpoint()); - - assertToXContentBody(explainRequest, request.getEntity()); - } - public void testTermVectors() throws IOException { String index = randomAlphaOfLengthBetween(3, 10); String id = randomAlphaOfLengthBetween(3, 10); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 22ff3aebae9ac..19e287fb91be5 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -64,7 +64,6 @@ import org.opensearch.join.aggregations.Children; import org.opensearch.join.aggregations.ChildrenAggregationBuilder; import org.opensearch.rest.RestStatus; -import org.opensearch.rest.action.document.RestIndexAction; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.script.mustache.MultiSearchTemplateRequest; @@ -125,24 +124,19 @@ public class SearchIT extends OpenSearchRestHighLevelClientTestCase { @Before public void indexDocuments() throws IOException { { - Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/type/1"); - doc1.setOptions(expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/_doc/1"); doc1.setJsonEntity("{\"type\":\"type1\", \"id\":1, \"num\":10, \"num2\":50}"); client().performRequest(doc1); - Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/type/2"); - doc2.setOptions(expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/_doc/2"); doc2.setJsonEntity("{\"type\":\"type1\", \"id\":2, \"num\":20, \"num2\":40}"); client().performRequest(doc2); - Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/type/3"); - doc3.setOptions(expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/_doc/3"); doc3.setJsonEntity("{\"type\":\"type1\", \"id\":3, \"num\":50, \"num2\":35}"); client().performRequest(doc3); - Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/type/4"); - doc4.setOptions(expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/_doc/4"); doc4.setJsonEntity("{\"type\":\"type2\", \"id\":4, \"num\":100, \"num2\":10}"); client().performRequest(doc4); - Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/type/5"); - doc5.setOptions(expectWarningsOnce(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/_doc/5"); doc5.setJsonEntity("{\"type\":\"type2\", \"id\":5, \"num\":100, \"num2\":10}"); client().performRequest(doc5); } @@ -241,13 +235,11 @@ public void testSearchNoQuery() throws IOException { assertEquals(5, searchResponse.getHits().getHits().length); for (SearchHit searchHit : searchResponse.getHits().getHits()) { assertEquals("index", searchHit.getIndex()); - assertEquals("type", searchHit.getType()); assertThat(Integer.valueOf(searchHit.getId()), both(greaterThan(0)).and(lessThan(6))); assertEquals(1.0f, searchHit.getScore(), 0); assertEquals(-1L, searchHit.getVersion()); assertNotNull(searchHit.getSourceAsMap()); assertEquals(4, searchHit.getSourceAsMap().size()); - assertTrue(searchHit.getSourceAsMap().containsKey("type")); assertTrue(searchHit.getSourceAsMap().containsKey("num")); assertTrue(searchHit.getSourceAsMap().containsKey("num2")); } @@ -266,7 +258,6 @@ public void testSearchMatchQuery() throws IOException { assertThat(searchResponse.getHits().getMaxScore(), greaterThan(0f)); SearchHit searchHit = searchResponse.getHits().getHits()[0]; assertEquals("index", searchHit.getIndex()); - assertEquals("type", searchHit.getType()); assertEquals("1", searchHit.getId()); assertThat(searchHit.getScore(), greaterThan(0f)); assertEquals(-1L, searchHit.getVersion()); @@ -1143,7 +1134,6 @@ public void testExplain() throws IOException { ExplainResponse explainResponse = execute(explainRequest, highLevelClient()::explain, highLevelClient()::explainAsync); assertThat(explainResponse.getIndex(), equalTo("index1")); - assertThat(explainResponse.getType(), equalTo("_doc")); assertThat(Integer.valueOf(explainResponse.getId()), equalTo(1)); assertTrue(explainResponse.isExists()); assertTrue(explainResponse.isMatch()); @@ -1158,7 +1148,6 @@ public void testExplain() throws IOException { ExplainResponse explainResponse = execute(explainRequest, highLevelClient()::explain, highLevelClient()::explainAsync); assertThat(explainResponse.getIndex(), equalTo("index1")); - assertThat(explainResponse.getType(), equalTo("_doc")); assertThat(Integer.valueOf(explainResponse.getId()), equalTo(1)); assertTrue(explainResponse.isExists()); assertTrue(explainResponse.isMatch()); @@ -1173,7 +1162,6 @@ public void testExplain() throws IOException { ExplainResponse explainResponse = execute(explainRequest, highLevelClient()::explain, highLevelClient()::explainAsync); assertThat(explainResponse.getIndex(), equalTo("index1")); - assertThat(explainResponse.getType(), equalTo("_doc")); assertThat(Integer.valueOf(explainResponse.getId()), equalTo(1)); assertTrue(explainResponse.isExists()); assertFalse(explainResponse.isMatch()); @@ -1189,7 +1177,6 @@ public void testExplain() throws IOException { ExplainResponse explainResponse = execute(explainRequest, highLevelClient()::explain, highLevelClient()::explainAsync); assertThat(explainResponse.getIndex(), equalTo("index1")); - assertThat(explainResponse.getType(), equalTo("_doc")); assertThat(Integer.valueOf(explainResponse.getId()), equalTo(1)); assertTrue(explainResponse.isExists()); assertFalse(explainResponse.isMatch()); @@ -1221,7 +1208,6 @@ public void testExplainNonExistent() throws IOException { ExplainResponse explainResponse = execute(explainRequest, highLevelClient()::explain, highLevelClient()::explainAsync); assertThat(explainResponse.getIndex(), equalTo("index1")); - assertThat(explainResponse.getType(), equalTo("_doc")); assertThat(explainResponse.getId(), equalTo("999")); assertFalse(explainResponse.isExists()); assertFalse(explainResponse.isMatch()); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SyncedFlushResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SyncedFlushResponseTests.java deleted file mode 100644 index e56e78d5d9caf..0000000000000 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SyncedFlushResponseTests.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.routing.ShardRoutingState; -import org.opensearch.cluster.routing.TestShardRouting; -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.xcontent.DeprecationHandler; -import org.opensearch.common.xcontent.ToXContent; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.shard.ShardId; -import org.opensearch.indices.flush.ShardsSyncedFlushResult; -import org.opensearch.indices.flush.SyncedFlushService; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class SyncedFlushResponseTests extends OpenSearchTestCase { - - public void testXContentSerialization() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); - TestPlan plan = createTestPlan(); - - XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent()); - assertNotNull(plan.result); - serverResponsebuilder.startObject(); - plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS); - serverResponsebuilder.endObject(); - XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent()); - assertNotNull(plan.result); - plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS); - Map serverContentMap = convertFailureListToSet( - serverResponsebuilder.generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(serverResponsebuilder).streamInput() - ) - .map() - ); - Map clientContentMap = convertFailureListToSet( - clientResponsebuilder.generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(clientResponsebuilder).streamInput() - ) - .map() - ); - assertEquals(serverContentMap, clientContentMap); - } - - public void testXContentDeserialization() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); - TestPlan plan = createTestPlan(); - XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); - builder.startObject(); - plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - XContentParser parser = builder.generator() - .contentType() - .xContent() - .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, BytesReference.bytes(builder).streamInput()); - SyncedFlushResponse originalResponse = plan.clientResult; - SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser); - assertNotNull(parsedResponse); - assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts()); - for (Map.Entry entry : originalResponse.getIndexResults().entrySet()) { - String index = entry.getKey(); - SyncedFlushResponse.IndexResult responseResult = entry.getValue(); - SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index); - assertNotNull(responseResult); - assertNotNull(parsedResult); - assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts()); - assertEquals(responseResult.failures().size(), parsedResult.failures().size()); - for (SyncedFlushResponse.ShardFailure responseShardFailure : responseResult.failures()) { - assertTrue(containsFailure(parsedResult.failures(), responseShardFailure)); - } - } - } - - static class TestPlan { - SyncedFlushResponse.ShardCounts totalCounts; - Map countsPerIndex = new HashMap<>(); - ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - org.opensearch.action.admin.indices.flush.SyncedFlushResponse result; - SyncedFlushResponse clientResult; - } - - TestPlan createTestPlan() throws IOException { - final TestPlan testPlan = new TestPlan(); - final Map> indicesResults = new HashMap<>(); - Map indexResults = new HashMap<>(); - final XContentType xContentType = randomFrom(XContentType.values()); - final int indexCount = randomIntBetween(1, 10); - int totalShards = 0; - int totalSuccessful = 0; - int totalFailed = 0; - for (int i = 0; i < indexCount; i++) { - final String index = "index_" + i; - int shards = randomIntBetween(1, 4); - int replicas = randomIntBetween(0, 2); - int successful = 0; - int failed = 0; - int failures = 0; - List shardsResults = new ArrayList<>(); - List shardFailures = new ArrayList<>(); - for (int shard = 0; shard < shards; shard++) { - final ShardId shardId = new ShardId(index, "_na_", shard); - if (randomInt(5) < 2) { - // total shard failure - failed += replicas + 1; - failures++; - shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); - shardFailures.add(new SyncedFlushResponse.ShardFailure(shardId.id(), "simulated total failure", new HashMap<>())); - } else { - Map shardResponses = new HashMap<>(); - for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = TestShardRouting.newShardRouting( - index, - shard, - "node_" + shardId + "_" + copy, - null, - copy == 0, - ShardRoutingState.STARTED - ); - if (randomInt(5) < 2) { - // shard copy failure - failed++; - failures++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); - // Building the shardRouting map here. - XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); - Map routing = shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS) - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) - .map(); - shardFailures.add(new SyncedFlushResponse.ShardFailure(shardId.id(), "copy failure " + shardId, routing)); - } else { - successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); - } - } - shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); - } - } - indicesResults.put(index, shardsResults); - indexResults.put(index, new SyncedFlushResponse.IndexResult(shards * (replicas + 1), successful, failed, shardFailures)); - testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); - testPlan.expectedFailuresPerIndex.put(index, failures); - totalFailed += failed; - totalShards += shards * (replicas + 1); - totalSuccessful += successful; - } - testPlan.result = new org.opensearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults); - testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed); - testPlan.clientResult = new SyncedFlushResponse( - new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed), - indexResults - ); - return testPlan; - } - - public boolean containsFailure(List failures, SyncedFlushResponse.ShardFailure origFailure) { - for (SyncedFlushResponse.ShardFailure failure : failures) { - if (failure.getShardId() == origFailure.getShardId() - && failure.getFailureReason().equals(origFailure.getFailureReason()) - && failure.getRouting().equals(origFailure.getRouting())) { - return true; - } - } - return false; - } - - public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) { - if (first == null) { - assertNull(second); - } else { - assertTrue(first.equals(second)); - } - } - - public Map convertFailureListToSet(Map input) { - Map retMap = new HashMap<>(); - for (Map.Entry entry : input.entrySet()) { - if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) { - retMap.put(entry.getKey(), entry.getValue()); - } else { - // This was an index entry. - @SuppressWarnings("unchecked") - Map indexResult = (Map) entry.getValue(); - Map retResult = new HashMap<>(); - for (Map.Entry entry2 : indexResult.entrySet()) { - if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) { - @SuppressWarnings("unchecked") - List failures = (List) entry2.getValue(); - Set retSet = new HashSet<>(failures); - retResult.put(entry.getKey(), retSet); - } else { - retResult.put(entry2.getKey(), entry2.getValue()); - } - } - retMap.put(entry.getKey(), retResult); - } - } - return retMap; - } -} diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java index 0db8ee4406c8c..d987e786fff76 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java @@ -117,7 +117,7 @@ public void testGetValidTask() throws Exception { } org.opensearch.tasks.TaskInfo info = taskResponse.getTaskInfo(); assertTrue(info.isCancellable()); - assertEquals("reindex from [source1] to [dest][_doc]", info.getDescription()); + assertEquals("reindex from [source1] to [dest]", info.getDescription()); assertEquals("indices:data/write/reindex", info.getAction()); if (taskResponse.isCompleted() == false) { assertBusy(checkTaskCompletionStatus(client(), taskId)); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java index 11a6aeb6dbe47..33b82c10d8873 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java @@ -59,7 +59,6 @@ public void testFromXContent() throws IOException { static void toXContent(TermVectorsResponse response, XContentBuilder builder) throws IOException { builder.startObject(); builder.field("_index", response.getIndex()); - builder.field("_type", response.getType()); if (response.getId() != null) { builder.field("_id", response.getId()); } @@ -130,7 +129,6 @@ private static void toXContent(TermVectorsResponse.TermVector tv, XContentBuilde static TermVectorsResponse createTestInstance() { String index = randomAlphaOfLength(5); - String type = randomAlphaOfLength(5); String id = String.valueOf(randomIntBetween(1, 100)); long version = randomNonNegativeLong(); long tookInMillis = randomNonNegativeLong(); @@ -154,7 +152,7 @@ static TermVectorsResponse createTestInstance() { ); } } - TermVectorsResponse tvresponse = new TermVectorsResponse(index, type, id, version, found, tookInMillis, tvList); + TermVectorsResponse tvresponse = new TermVectorsResponse(index, id, version, found, tookInMillis, tvList); return tvresponse; } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java index 67df99d9d7c08..959c5a827f143 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java @@ -1719,9 +1719,8 @@ public void testTermVectors() throws Exception { // tag::term-vectors-response String index = response.getIndex(); // <1> - String type = response.getType(); // <2> - String id = response.getId(); // <3> - boolean found = response.getFound(); // <4> + String id = response.getId(); // <2> + boolean found = response.getFound(); // <3> // end::term-vectors-response if (response.getTermVectorsList() != null) { @@ -2051,7 +2050,6 @@ private MultiGetItemResponse unwrapAndAssertExample(MultiGetResponse response) { assertThat(response.getResponses(), arrayWithSize(1)); MultiGetItemResponse item = response.getResponses()[0]; assertEquals("index", item.getIndex()); - assertEquals("_doc", item.getType()); assertEquals("example_id", item.getId()); return item; } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index ad2b0d1e603bb..3fbe7f63b09a2 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -44,7 +44,6 @@ import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.open.OpenIndexRequest; @@ -69,7 +68,6 @@ import org.opensearch.client.GetAliasesResponse; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; -import org.opensearch.client.SyncedFlushResponse; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.CloseIndexRequest; @@ -1012,94 +1010,6 @@ public void onFailure(Exception e) { } } - @SuppressWarnings("unused") - public void testSyncedFlushIndex() throws Exception { - RestHighLevelClient client = highLevelClient(); - - { - createIndex("index1", Settings.EMPTY); - } - - { - // tag::flush-synced-request - SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1> - SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2> - SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3> - // end::flush-synced-request - - // tag::flush-synced-request-indicesOptions - request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> - // end::flush-synced-request-indicesOptions - - // tag::flush-synced-execute - SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request, expectWarnings( - "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead." - )); - // end::flush-synced-execute - - // tag::flush-synced-response - int totalShards = flushSyncedResponse.totalShards(); // <1> - int successfulShards = flushSyncedResponse.successfulShards(); // <2> - int failedShards = flushSyncedResponse.failedShards(); // <3> - - for (Map.Entry responsePerIndexEntry: - flushSyncedResponse.getIndexResults().entrySet()) { - String indexName = responsePerIndexEntry.getKey(); // <4> - SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue(); - int totalShardsForIndex = indexResult.totalShards(); // <5> - int successfulShardsForIndex = indexResult.successfulShards(); // <6> - int failedShardsForIndex = indexResult.failedShards(); // <7> - if (failedShardsForIndex > 0) { - for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) { - int shardId = failureEntry.getShardId(); // <8> - String failureReason = failureEntry.getFailureReason(); // <9> - Map routing = failureEntry.getRouting(); // <10> - } - } - } - // end::flush-synced-response - - // tag::flush-synced-execute-listener - ActionListener listener = new ActionListener() { - @Override - public void onResponse(SyncedFlushResponse refreshResponse) { - // <1> - } - - @Override - public void onFailure(Exception e) { - // <2> - } - }; - // end::flush-synced-execute-listener - - // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); - - // tag::flush-synced-execute-async - client.indices().flushSyncedAsync(request, expectWarnings( - "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead." - ), listener); // <1> - // end::flush-synced-execute-async - - assertTrue(latch.await(30L, TimeUnit.SECONDS)); - } - - { - // tag::flush-synced-notfound - try { - SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist"); - client.indices().flushSynced(request, RequestOptions.DEFAULT); - } catch (OpenSearchException exception) { - if (exception.status() == RestStatus.NOT_FOUND) { - // <1> - } - } - // end::flush-synced-notfound - } - } - public void testGetSettings() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java index 47a116458cb96..9f5c2e51a7960 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java @@ -88,7 +88,6 @@ import static org.opensearch.index.query.QueryBuilders.spanWithinQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.QueryBuilders.termsQuery; -import static org.opensearch.index.query.QueryBuilders.typeQuery; import static org.opensearch.index.query.QueryBuilders.wildcardQuery; import static org.opensearch.index.query.QueryBuilders.wrapperQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction; @@ -447,12 +446,6 @@ public void testTerms() { // end::terms } - public void testType() { - // tag::type - typeQuery("my_type"); // <1> - // end::type - } - public void testWildcard() { // tag::wildcard wildcardQuery( diff --git a/client/sniffer/licenses/jackson-core-2.12.5.jar.sha1 b/client/sniffer/licenses/jackson-core-2.12.5.jar.sha1 deleted file mode 100644 index ed27d8a96bf20..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -725e364cc71b80e60fa450bd06d75cdea7fb2d59 \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.12.6.jar.sha1 b/client/sniffer/licenses/jackson-core-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..d62c70d6b0f11 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.12.6.jar.sha1 @@ -0,0 +1 @@ +5bf206c0b5982cfcd868b3d9349dc5190db8bab5 \ No newline at end of file diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java index 2a6362c611329..adddb3bda725c 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java @@ -160,9 +160,8 @@ public void run() { // tasks are run by a single threaded executor, so swapping is safe with a simple volatile variable ScheduledTask previousTask = nextScheduledTask; nextScheduledTask = new ScheduledTask(task, future); - assert initialized.get() == false - || previousTask.task.isSkipped() - || previousTask.task.hasStarted() : "task that we are replacing is neither " + "cancelled nor has it ever started"; + assert initialized.get() == false || previousTask.task.isSkipped() || previousTask.task.hasStarted() + : "task that we are replacing is neither " + "cancelled nor has it ever started"; } } diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/RequestsWithoutContentIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/RequestsWithoutContentIT.java index 474f7052b8450..92c35ccf316c7 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/RequestsWithoutContentIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/RequestsWithoutContentIT.java @@ -43,7 +43,7 @@ public class RequestsWithoutContentIT extends OpenSearchRestTestCase { public void testIndexMissingBody() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> - client().performRequest(new Request(randomBoolean() ? "POST" : "PUT", "/idx/type/123"))); + client().performRequest(new Request(randomBoolean() ? "POST" : "PUT", "/idx/_doc/123"))); assertResponseException(responseException, "request body is required"); } diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index 94f3f03e5fa4a..c9be5c632cb59 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -63,7 +63,9 @@ FROM ${base_image} ENV OPENSEARCH_CONTAINER true -RUN for iter in {1..10}; do \\ +RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* && \\ + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* && \\ + for iter in {1..10}; do \\ ${package_manager} update --setopt=tsflags=nodocs -y && \\ ${package_manager} install --setopt=tsflags=nodocs -y \\ nc shadow-utils zip unzip && \\ diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index e5c75af5188e5..93a82ff324835 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "nebula.ospackage-base" version "9.0.0" + id "nebula.ospackage-base" version "9.1.1" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/packages/src/common/env/opensearch b/distribution/packages/src/common/env/opensearch index 52a2639abbc46..198bcfde90c4c 100644 --- a/distribution/packages/src/common/env/opensearch +++ b/distribution/packages/src/common/env/opensearch @@ -6,7 +6,7 @@ #OPENSEARCH_HOME=/usr/share/opensearch # OpenSearch Java path -#JAVA_HOME= +#OPENSEARCH_JAVA_HOME= # OpenSearch configuration directory # Note: this setting will be shared with command-line tools diff --git a/distribution/packages/src/deb/init.d/opensearch b/distribution/packages/src/deb/init.d/opensearch index cc95b465c88d6..e5195d2d54dba 100755 --- a/distribution/packages/src/deb/init.d/opensearch +++ b/distribution/packages/src/deb/init.d/opensearch @@ -66,8 +66,9 @@ DAEMON=$OPENSEARCH_HOME/bin/opensearch DAEMON_OPTS="-d -p $PID_FILE" export OPENSEARCH_JAVA_OPTS -export JAVA_HOME export OPENSEARCH_PATH_CONF +export JAVA_HOME +export OPENSEARCH_JAVA_HOME if [ ! -x "$DAEMON" ]; then echo "The opensearch startup script does not exists or it is not executable, tried: $DAEMON" diff --git a/distribution/packages/src/rpm/init.d/opensearch b/distribution/packages/src/rpm/init.d/opensearch index d31fca9d2d1a7..12a1470e75acb 100644 --- a/distribution/packages/src/rpm/init.d/opensearch +++ b/distribution/packages/src/rpm/init.d/opensearch @@ -53,6 +53,7 @@ export OPENSEARCH_JAVA_OPTS export JAVA_HOME export OPENSEARCH_PATH_CONF export OPENSEARCH_STARTUP_SLEEP_TIME +export OPENSEARCH_JAVA_HOME lockfile=/var/lock/subsys/$prog diff --git a/distribution/src/bin/opensearch-env b/distribution/src/bin/opensearch-env index 99bded2ad0e52..6fe703a73b2de 100644 --- a/distribution/src/bin/opensearch-env +++ b/distribution/src/bin/opensearch-env @@ -44,8 +44,11 @@ OPENSEARCH_HOME=`dirname "$OPENSEARCH_HOME"` # now set the classpath OPENSEARCH_CLASSPATH="$OPENSEARCH_HOME/lib/*" -# now set the path to java -if [ ! -z "$JAVA_HOME" ]; then +# now set the path to java: OPENSEARCH_JAVA_HOME -> JAVA_HOME -> bundled JDK +if [ ! -z "$OPENSEARCH_JAVA_HOME" ]; then + JAVA="$OPENSEARCH_JAVA_HOME/bin/java" + JAVA_TYPE="OPENSEARCH_JAVA_HOME" +elif [ ! -z "$JAVA_HOME" ]; then JAVA="$JAVA_HOME/bin/java" JAVA_TYPE="JAVA_HOME" else diff --git a/distribution/src/bin/opensearch-env.bat b/distribution/src/bin/opensearch-env.bat index 244aa4452d581..bc8a6ce53a5f5 100644 --- a/distribution/src/bin/opensearch-env.bat +++ b/distribution/src/bin/opensearch-env.bat @@ -39,16 +39,19 @@ if "%1" == "nojava" ( exit /b ) -rem compariing to empty string makes this equivalent to bash -v check on env var +rem comparing to empty string makes this equivalent to bash -v check on env var rem and allows to effectively force use of the bundled jdk when launching OpenSearch -rem by setting JAVA_HOME= -if "%JAVA_HOME%" == "" ( +rem by setting OPENSEARCH_JAVA_HOME= and JAVA_HOME= +if not "%OPENSEARCH_JAVA_HOME%" == "" ( + set JAVA="%OPENSEARCH_JAVA_HOME%\bin\java.exe" + set JAVA_TYPE=OPENSEARCH_JAVA_HOME +) else if not "%JAVA_HOME%" == "" ( + set JAVA="%JAVA_HOME%\bin\java.exe" + set JAVA_TYPE=JAVA_HOME +) else ( set JAVA="%OPENSEARCH_HOME%\jdk\bin\java.exe" set JAVA_HOME="%OPENSEARCH_HOME%\jdk" set JAVA_TYPE=bundled jdk -) else ( - set JAVA="%JAVA_HOME%\bin\java.exe" - set JAVA_TYPE=JAVA_HOME ) if not exist !JAVA! ( diff --git a/distribution/tools/keystore-cli/build.gradle b/distribution/tools/keystore-cli/build.gradle index 670c898019d28..05dddbed501af 100644 --- a/distribution/tools/keystore-cli/build.gradle +++ b/distribution/tools/keystore-cli/build.gradle @@ -35,5 +35,5 @@ dependencies { compileOnly project(":libs:opensearch-cli") testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.1' - testRuntimeOnly 'com.google.guava:guava:30.1.1-jre' + testRuntimeOnly 'com.google.guava:guava:31.0.1-jre' } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index d96fced1ec293..2f3ede7194a6d 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -38,8 +38,8 @@ dependencies { api "org.bouncycastle:bcpg-fips:1.0.5.1" api "org.bouncycastle:bc-fips:1.0.2.1" testImplementation project(":test:framework") - testImplementation 'com.google.jimfs:jimfs:1.1' - testRuntimeOnly 'com.google.guava:guava:30.1.1-jre' + testImplementation 'com.google.jimfs:jimfs:1.2' + testRuntimeOnly 'com.google.guava:guava:31.0.1-jre' } tasks.named("dependencyLicenses").configure { diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index b404614ca435b..8acf137043a92 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -218,11 +218,23 @@ class InstallPluginCommand extends EnvironmentAwareCommand { Arrays.asList("b", "batch"), "Enable batch mode explicitly, automatic confirmation of security permission" ); - this.arguments = parser.nonOptions("plugin id"); + this.arguments = parser.nonOptions("plugin "); } @Override protected void printAdditionalHelp(Terminal terminal) { + terminal.println("Plugins are packaged as zip files. Each packaged plugin must contain a plugin properties file."); + terminal.println(""); + + // List possible plugin id inputs + terminal.println("The install command takes a plugin id, which may be any of the following:"); + terminal.println(" An official opensearch plugin name"); + terminal.println(" Maven coordinates to a plugin zip"); + terminal.println(" A URL to a plugin zip"); + terminal.println(" A local zip file"); + terminal.println(""); + + // List official opensearch plugin names terminal.println("The following official plugins may be installed by name:"); for (String plugin : OFFICIAL_PLUGINS) { terminal.println(" " + plugin); @@ -401,7 +413,7 @@ private String getMavenUrl(Terminal terminal, String[] coordinates, String platf boolean urlExists(Terminal terminal, String urlString) throws IOException { terminal.println(VERBOSE, "Checking if url exists: " + urlString); URL url = new URL(urlString); - assert "https".equals(url.getProtocol()) : "Only http urls can be checked"; + assert "https".equals(url.getProtocol()) : "Use of https protocol is required"; HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection(); urlConnection.addRequestProperty("User-Agent", "opensearch-plugin-installer"); urlConnection.setRequestMethod("HEAD"); diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java index a57050540a216..e0e5cbc54276e 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java @@ -828,6 +828,31 @@ protected boolean addShutdownHook() { } } + public void testPluginsHelpNonOptionArgumentsOutput() throws Exception { + MockTerminal terminal = new MockTerminal(); + new InstallPluginCommand() { + @Override + protected boolean addShutdownHook() { + return false; + } + }.main(new String[] { "--help" }, terminal); + try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) { + + // grab first line of --help output + String line = reader.readLine(); + + // find the beginning of Non-option arguments list + while (line.contains("Non-option arguments:") == false) { + line = reader.readLine(); + } + + // check that non option agrument list contains correct string + line = reader.readLine(); + assertThat(line, containsString("")); + + } + } + public void testInstallMisspelledOfficialPlugins() throws Exception { Tuple env = createEnv(fs, temp); diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle index 5018a4bb8702e..29d06b89395c6 100644 --- a/distribution/tools/upgrade-cli/build.gradle +++ b/distribution/tools/upgrade-cli/build.gradle @@ -19,7 +19,7 @@ dependencies { implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.2' - testRuntimeOnly 'com.google.guava:guava:30.1.1-jre' + testRuntimeOnly 'com.google.guava:guava:31.0.1-jre' } tasks.named("dependencyLicenses").configure { diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.12.5.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.12.5.jar.sha1 deleted file mode 100644 index 797bcf2b161d4..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52d929d5bb21d0186fe24c09624cc3ee4bafc3b3 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.12.6.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..48ee3bf53c630 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.12.6.jar.sha1 @@ -0,0 +1 @@ +9487231edd6b0b1f14692c9cba9e0462809215d1 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.12.5.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.12.5.jar.sha1 deleted file mode 100644 index ca1bd46bc3cd3..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b064cf057f23d3d35390328c5030847efeffedde \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.12.6.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..f74842887d31b --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.12.6.jar.sha1 @@ -0,0 +1 @@ +fac216b606c1086e36acea6e572ee61572ad1670 \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index 0974eb6d6b7da..53b593923ce26 100644 --- a/gradle.properties +++ b/gradle.properties @@ -11,7 +11,12 @@ org.gradle.warning.mode=none org.gradle.parallel=true -org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m +org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m \ + --add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED \ + --add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED \ + --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED \ + --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED \ + --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED options.forkOptions.memoryMaximumSize=2g # Disable duplicate project id detection diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 9abaf35e589be..7f1e9cb8d04b3 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'nebula.optional-base' apply plugin: 'opensearch.publish' dependencies { - api 'net.sf.jopt-simple:jopt-simple:5.0.2' + api 'net.sf.jopt-simple:jopt-simple:5.0.4' api project(':libs:opensearch-core') } diff --git a/libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 b/libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 deleted file mode 100644 index b50ed4fea3bd1..0000000000000 --- a/libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98cafc6081d5632b61be2c9e60650b64ddbc637c \ No newline at end of file diff --git a/libs/cli/licenses/jopt-simple-5.0.4.jar.sha1 b/libs/cli/licenses/jopt-simple-5.0.4.jar.sha1 new file mode 100644 index 0000000000000..7ade81efe4d0d --- /dev/null +++ b/libs/cli/licenses/jopt-simple-5.0.4.jar.sha1 @@ -0,0 +1 @@ +4fdac2fbe92dfad86aa6e9301736f6b4342a3f5c \ No newline at end of file diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index b324bba381a26..ce23406721fe6 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -29,7 +29,7 @@ */ dependencies { - api 'org.jruby.joni:joni:2.1.29' + api 'org.jruby.joni:joni:2.1.41' // joni dependencies: api 'org.jruby.jcodings:jcodings:1.0.44' @@ -41,3 +41,7 @@ dependencies { tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } + +thirdPartyAudit.ignoreMissingClasses( + 'org.jcodings.unicode.UnicodeCodeRange' +) \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.29.jar.sha1 b/libs/grok/licenses/joni-2.1.29.jar.sha1 deleted file mode 100644 index 251ff2ec05a19..0000000000000 --- a/libs/grok/licenses/joni-2.1.29.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb751702e1194ff24259155db4d37e9383d4320 \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.41.jar.sha1 b/libs/grok/licenses/joni-2.1.41.jar.sha1 new file mode 100644 index 0000000000000..4f0a0a8393dd0 --- /dev/null +++ b/libs/grok/licenses/joni-2.1.41.jar.sha1 @@ -0,0 +1 @@ +4a35f4eaef792073bc081b756b1f4949879cd41e \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.12.5.jar.sha1 b/libs/x-content/licenses/jackson-core-2.12.5.jar.sha1 deleted file mode 100644 index ed27d8a96bf20..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -725e364cc71b80e60fa450bd06d75cdea7fb2d59 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.12.6.jar.sha1 b/libs/x-content/licenses/jackson-core-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..d62c70d6b0f11 --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.12.6.jar.sha1 @@ -0,0 +1 @@ +5bf206c0b5982cfcd868b3d9349dc5190db8bab5 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.12.5.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.12.5.jar.sha1 deleted file mode 100644 index f992f732a7f91..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b6f24ee5ac7cde7f5a4e574bd0af4a72ecb55f6 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.12.6.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..9fdb5a5012b8d --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.12.6.jar.sha1 @@ -0,0 +1 @@ +3cd2e6a538f73483c6c59c354ce2276bcdc5ba7b \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.12.5.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.12.5.jar.sha1 deleted file mode 100644 index 9e2c3222014c2..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4233326c74c6601fbbeea11c103c011859cc687d \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.12.6.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..a8eb043684fac --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.12.6.jar.sha1 @@ -0,0 +1 @@ +bc9b6bcf12a14382424324ee067456ee354a0dfb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.12.5.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.12.5.jar.sha1 deleted file mode 100644 index 6eba2dad4947f..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89ec27e5f422b0749b1133137c1b36debbb5f3bc \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.12.6.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..e2ed10942b3b6 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.12.6.jar.sha1 @@ -0,0 +1 @@ +cfba448bc4e92b8656968756a9c4af1ad8e502e4 \ No newline at end of file diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ArabicAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ArabicAnalyzerProvider.java index 397c1b2c922b0..40239cbf38567 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ArabicAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ArabicAnalyzerProvider.java @@ -50,7 +50,6 @@ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider> getTokenFilters() { filters.put("classic", ClassicFilterFactory::new); filters.put("czech_stem", CzechStemTokenFilterFactory::new); filters.put("common_grams", requiresAnalysisSettings(CommonGramsTokenFilterFactory::new)); + filters.put("concatenate_graph", ConcatenateGraphTokenFilterFactory::new); filters.put( "condition", requiresAnalysisSettings((i, e, n, s) -> new ScriptedConditionTokenFilterFactory(i, n, s, scriptService.get())) diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java new file mode 100644 index 0000000000000..0d1a2b185d1d3 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; +import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; +import org.opensearch.LegacyESVersion; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.AbstractTokenFilterFactory; + +/** + * Factory for {@link ConcatenateGraphFilter}. + * Adopted from {@link org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilterFactory}, with some changes to + * default values: token_separator is a "space", preserve_position_increments is false to avoid duplicated separators, + * max_graph_expansions is 100 as the default value of 10_000 seems to be unnecessarily large and preserve_separator is false. + * + *
    + *
  • preserve_separator: + * For LegacyESVersion lesser than {@link LegacyESVersion#V_7_6_0} i.e. lucene versions lesser + * than {@link org.apache.lucene.util.Version#LUCENE_8_4_0} + * Whether {@link ConcatenateGraphFilter#SEP_LABEL} should separate the input tokens in the concatenated token. + *
  • + *
  • token_separator: + * Separator to use for concatenation. Must be a String with a single character or empty. + * If not present, {@link ConcatenateGraphTokenFilterFactory#DEFAULT_TOKEN_SEPARATOR} will be used. + * If empty i.e. "", tokens will be concatenated without any separators. + *
  • + *
  • preserve_position_increments: + * Whether to add an empty token for missing positions. + * If not present, {@link ConcatenateGraphTokenFilterFactory#DEFAULT_PRESERVE_POSITION_INCREMENTS} will be used. + *
  • + *
  • max_graph_expansions: + * If the tokenStream graph has more than this many possible paths through, then we'll throw + * {@link TooComplexToDeterminizeException} to preserve the stability and memory of the + * machine. + * If not present, {@link ConcatenateGraphTokenFilterFactory#DEFAULT_MAX_GRAPH_EXPANSIONS} will be used. + *
  • + *
+ * @see ConcatenateGraphFilter + */ +public class ConcatenateGraphTokenFilterFactory extends AbstractTokenFilterFactory { + public static final String DEFAULT_TOKEN_SEPARATOR = " "; + public static final int DEFAULT_MAX_GRAPH_EXPANSIONS = 100; + public static final boolean DEFAULT_PRESERVE_POSITION_INCREMENTS = false; + + private final Character tokenSeparator; + private final int maxGraphExpansions; + private final boolean preservePositionIncrements; + + ConcatenateGraphTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + + if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0)) { // i.e. Lucene 8.4.0 + String separator = settings.get("token_separator", DEFAULT_TOKEN_SEPARATOR); + if (separator.length() > 1) { + throw new IllegalArgumentException("token_separator must be either empty or a single character"); + } + tokenSeparator = separator.length() == 0 ? null : separator.charAt(0); // null means no separator while concatenating + } else { + boolean preserveSep = settings.getAsBoolean("preserve_separator", ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP); + tokenSeparator = preserveSep ? ConcatenateGraphFilter.DEFAULT_TOKEN_SEPARATOR : null; + } + + maxGraphExpansions = settings.getAsInt("max_graph_expansions", DEFAULT_MAX_GRAPH_EXPANSIONS); + preservePositionIncrements = settings.getAsBoolean("preserve_position_increments", DEFAULT_PRESERVE_POSITION_INCREMENTS); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new ConcatenateGraphFilter(tokenStream, tokenSeparator, preservePositionIncrements, maxGraphExpansions); + } +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CzechAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CzechAnalyzerProvider.java index b4d3820767c7b..f6bc6b3f7ea02 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CzechAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CzechAnalyzerProvider.java @@ -50,7 +50,6 @@ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider "Is" --> "Awe" --> "Some" + // Expected output from word_delimiter_graph is a graph: + // ---> "Power" --> "Shot" ---> "Is" ---> "Awe" ---> "Some" --- + // | | | | + // --> "PowerShot" -------- --> "AweSome" --------- + // and this filter will traverse through all possible paths to produce concatenated tokens + String[] expected = new String[] { + "Power Shot Is Awe Some", + "Power Shot Is AweSome", + "PowerShot Is Awe Some", + "PowerShot Is AweSome" }; + + // all tokens will be in the same position + int[] expectedPosIncrements = new int[] { 1, 0, 0, 0 }; + int[] expectedPosLengths = new int[] { 1, 1, 1, 1 }; + + NamedAnalyzer analyzer = analysis.indexAnalyzers.get("my_analyzer"); + assertAnalyzesToPositions(analyzer, source, expected, expectedPosIncrements, expectedPosLengths); + } + + public void testInvalidSeparator() { + expectThrows( + IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") + .put("index.analysis.filter.my_concatenate_graph.token_separator", "11") + .build(), + new CommonAnalysisPlugin() + ) + ); + } + + /** + * Similar to the {@link #testGraph()} case, there will be 4 paths generated by word_delimiter_graph. + * By setting max_graph_expansions to 3, we expect an exception. + */ + public void testMaxGraphExpansion() throws IOException { + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter_graph") + .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") + .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") + .put("index.analysis.filter.my_concatenate_graph.max_graph_expansions", "3") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace") + .put("index.analysis.analyzer.my_analyzer.filter", "my_word_delimiter, my_concatenate_graph") + .build(), + new CommonAnalysisPlugin() + ); + + String source = "PowerShot Is AweSome"; + + TokenStream tokenStream = analysis.indexAnalyzers.get("my_analyzer").tokenStream("dummy", source); + + tokenStream.reset(); + + expectThrows(TooComplexToDeterminizeException.class, tokenStream::incrementToken); + } +} diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java index bc7dd3b110287..08e6aa4aa4c1d 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -140,7 +140,6 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { client().prepareIndex("test", "test", "1").setSource("name", "ARCOTEL Hotels Deutschland").get(); refresh(); SearchResponse search = client().prepareSearch("test") - .setTypes("test") .setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)) .highlighter(new HighlightBuilder().field("name.autocomplete")) .get(); diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 3bca0e1b950bb..56ed2175df60a 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -24,9 +24,6 @@ --- "ngram_exception": - - skip: - version: " - 6.99.99" - reason: only starting from version 7.x this throws an error - do: catch: /The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to[:] \[1\] but was \[2\]\. This limit can be set by changing the \[index.max_ngram_diff\] index level setting\./ indices.analyze: diff --git a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java index babf024da019b..6efd7cbcd9c41 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java @@ -209,7 +209,7 @@ public Settings onNodeStopped(String nodeName) { ) ); - Map source = client().prepareGet("index", "doc", "1").get().getSource(); + Map source = client().prepareGet("index", "1").get().getSource(); assertThat(source.get("x"), equalTo(0)); assertThat(source.get("y"), equalTo(0)); } @@ -242,7 +242,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - Map source = client().prepareGet("index", "doc", "1").get().getSource(); + Map source = client().prepareGet("index", "1").get().getSource(); assertThat(source.get("x"), equalTo(0)); assertThat(source.get("y"), equalTo(0)); assertThat(source.get("z"), equalTo(0)); @@ -260,7 +260,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - source = client().prepareGet("index", "doc", "2").get().getSource(); + source = client().prepareGet("index", "2").get().getSource(); assertThat(source.get("x"), equalTo(0)); assertThat(source.get("y"), equalTo(0)); assertThat(source.get("z"), equalTo(0)); @@ -281,7 +281,7 @@ public void testWithDedicatedIngestNode() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - Map source = client().prepareGet("index", "doc", "1").get().getSource(); + Map source = client().prepareGet("index", "1").get().getSource(); assertThat(source.get("x"), equalTo(0)); assertThat(source.get("y"), equalTo(0)); @@ -294,7 +294,7 @@ public void testWithDedicatedIngestNode() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - source = client(ingestNode).prepareGet("index", "doc", "2").get().getSource(); + source = client(ingestNode).prepareGet("index", "2").get().getSource(); assertThat(source.get("x"), equalTo(0)); assertThat(source.get("y"), equalTo(0)); } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateIndexNameProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateIndexNameProcessorTests.java index 820ef3a8ee9c2..1ff2aa7fdd629 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateIndexNameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateIndexNameProcessorTests.java @@ -60,7 +60,6 @@ public void testJavaPattern() throws Exception { ); IngestDocument document = new IngestDocument( "_index", - "_type", "_id", null, null, @@ -83,7 +82,6 @@ public void testTAI64N() throws Exception { ); IngestDocument document = new IngestDocument( "_index", - "_type", "_id", null, null, @@ -104,19 +102,11 @@ public void testUnixMs() throws Exception { "m", "yyyyMMdd" ); - IngestDocument document = new IngestDocument( - "_index", - "_type", - "_id", - null, - null, - null, - Collections.singletonMap("_field", "1000500") - ); + IngestDocument document = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("_field", "1000500")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); - document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", 1000500L)); + document = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("_field", 1000500L)); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); } @@ -131,15 +121,7 @@ public void testUnix() throws Exception { "m", "yyyyMMdd" ); - IngestDocument document = new IngestDocument( - "_index", - "_type", - "_id", - null, - null, - null, - Collections.singletonMap("_field", "1000.5") - ); + IngestDocument document = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("_field", "1000.5")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); } @@ -160,7 +142,7 @@ public void testTemplatedFields() throws Exception { indexNameFormat ); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", date)); + IngestDocument document = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("_field", date)); dateProcessor.execute(document); assertThat( diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java index 6f44b81e7b43b..ca0c0df40f009 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java @@ -55,7 +55,6 @@ public class DissectProcessorTests extends OpenSearchTestCase { public void testMatch() { IngestDocument ingestDocument = new IngestDocument( "_index", - "_type", "_id", null, null, @@ -72,7 +71,6 @@ public void testMatch() { public void testMatchOverwrite() { IngestDocument ingestDocument = new IngestDocument( "_index", - "_type", "_id", null, null, @@ -90,7 +88,6 @@ public void testMatchOverwrite() { public void testAdvancedMatch() { IngestDocument ingestDocument = new IngestDocument( "_index", - "_type", "_id", null, null, @@ -116,7 +113,6 @@ public void testAdvancedMatch() { public void testMiss() { IngestDocument ingestDocument = new IngestDocument( "_index", - "_type", "_id", null, null, diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java index f0c61700f4db0..8db3cefc3a6fd 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java @@ -61,15 +61,7 @@ public void testExecuteWithAsyncProcessor() throws Exception { values.add("foo"); values.add("bar"); values.add("baz"); - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_type", - "_id", - null, - null, - null, - Collections.singletonMap("values", values) - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("values", values)); ForEachProcessor processor = new ForEachProcessor("_tag", null, "values", new AsyncUpperCaseProcessor("_ingest._value"), false); processor.execute(ingestDocument, (result, e) -> {}); @@ -87,7 +79,6 @@ public void testExecuteWithAsyncProcessor() throws Exception { public void testExecuteWithFailure() throws Exception { IngestDocument ingestDocument = new IngestDocument( "_index", - "_type", "_id", null, null, @@ -132,15 +123,7 @@ public void testMetadataAvailable() throws Exception { List> values = new ArrayList<>(); values.add(new HashMap<>()); values.add(new HashMap<>()); - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_type", - "_id", - null, - null, - null, - Collections.singletonMap("values", values) - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("values", values)); TestProcessor innerProcessor = new TestProcessor(id -> { id.setFieldValue("_ingest._value.index", id.getSourceAndMetadata().get("_index")); @@ -152,10 +135,8 @@ public void testMetadataAvailable() throws Exception { assertThat(innerProcessor.getInvokedCounter(), equalTo(2)); assertThat(ingestDocument.getFieldValue("values.0.index", String.class), equalTo("_index")); - assertThat(ingestDocument.getFieldValue("values.0.type", String.class), equalTo("_type")); assertThat(ingestDocument.getFieldValue("values.0.id", String.class), equalTo("_id")); assertThat(ingestDocument.getFieldValue("values.1.index", String.class), equalTo("_index")); - assertThat(ingestDocument.getFieldValue("values.1.type", String.class), equalTo("_type")); assertThat(ingestDocument.getFieldValue("values.1.id", String.class), equalTo("_id")); } @@ -170,7 +151,7 @@ public void testRestOfTheDocumentIsAvailable() throws Exception { document.put("values", values); document.put("flat_values", new ArrayList<>()); document.put("other", "value"); - IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", null, null, null, document); ForEachProcessor processor = new ForEachProcessor( "_tag", @@ -220,15 +201,7 @@ public String getDescription() { int numValues = randomIntBetween(1, 10000); List values = IntStream.range(0, numValues).mapToObj(i -> "").collect(Collectors.toList()); - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_type", - "_id", - null, - null, - null, - Collections.singletonMap("values", values) - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("values", values)); ForEachProcessor processor = new ForEachProcessor("_tag", null, "values", innerProcessor, false); processor.execute(ingestDocument, (result, e) -> {}); @@ -244,15 +217,7 @@ public void testModifyFieldsOutsideArray() throws Exception { values.add("string"); values.add(1); values.add(null); - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_type", - "_id", - null, - null, - null, - Collections.singletonMap("values", values) - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("values", values)); TemplateScript.Factory template = new TestTemplateService.MockTemplateScript.Factory("errors"); @@ -290,7 +255,7 @@ public void testScalarValueAllowsUnderscoreValueFieldToRemainAccessible() throws Map source = new HashMap<>(); source.put("_value", "new_value"); source.put("values", values); - IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, source); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", null, null, null, source); TestProcessor processor = new TestProcessor( doc -> doc.setFieldValue("_ingest._value", doc.getFieldValue("_source._value", String.class)) @@ -320,15 +285,7 @@ public void testNestedForEach() throws Exception { value.put("values2", innerValues); values.add(value); - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_type", - "_id", - null, - null, - null, - Collections.singletonMap("values1", values) - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("values1", values)); TestProcessor testProcessor = new TestProcessor( doc -> doc.setFieldValue("_ingest._value", doc.getFieldValue("_ingest._value", String.class).toUpperCase(Locale.ENGLISH)) @@ -352,7 +309,7 @@ public void testNestedForEach() throws Exception { } public void testIgnoreMissing() throws Exception { - IngestDocument originalIngestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.emptyMap()); + IngestDocument originalIngestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); TestProcessor testProcessor = new TestProcessor(doc -> {}); ForEachProcessor processor = new ForEachProcessor("_tag", null, "_ingest._value", testProcessor, true); @@ -363,7 +320,7 @@ public void testIgnoreMissing() throws Exception { public void testAppendingToTheSameField() { Map source = Collections.singletonMap("field", Arrays.asList("a", "b")); - IngestDocument originalIngestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, source); + IngestDocument originalIngestDocument = new IngestDocument("_index", "_id", null, null, null, source); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); TestProcessor testProcessor = new TestProcessor(id -> id.appendFieldValue("field", "a")); ForEachProcessor processor = new ForEachProcessor("_tag", null, "field", testProcessor, true); @@ -375,7 +332,7 @@ public void testAppendingToTheSameField() { public void testRemovingFromTheSameField() { Map source = Collections.singletonMap("field", Arrays.asList("a", "b")); - IngestDocument originalIngestDocument = new IngestDocument("_index", "_id", "_type", null, null, null, source); + IngestDocument originalIngestDocument = new IngestDocument("_index", "_id", null, null, null, source); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); TestProcessor testProcessor = new TestProcessor(id -> id.removeField("field.0")); ForEachProcessor processor = new ForEachProcessor("_tag", null, "field", testProcessor, true); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml index 709d4b9e62d43..916a7fe656cc2 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml @@ -62,7 +62,6 @@ teardown: catch: '/Unable to find match for dissect pattern: \%\{a\},\%\{b\},\%\{c\} against source: foo bar baz/' index: index: test - type: test id: 2 pipeline: "my_pipeline" body: {message: "foo bar baz"} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_foreach.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_foreach.yml index 7fbf182eac05f..9142317ce1507 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_foreach.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_foreach.yml @@ -31,7 +31,6 @@ teardown: - do: index: index: test - type: test id: 1 pipeline: "my_pipeline" body: > @@ -42,7 +41,6 @@ teardown: - do: get: index: test - type: test id: 1 - match: { _source.values: ["FOO", "BAR", "BAZ"] } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml index 2224d56165fd3..e012a82b15927 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -32,7 +32,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -66,7 +65,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -97,7 +95,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -112,7 +109,7 @@ teardown: - match: { error.root_cause.0.property_name: "field" } --- -"Test simulate without index type and id": +"Test simulate without id": - do: ingest.simulate: body: > @@ -166,7 +163,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -190,7 +186,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -223,7 +218,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -275,7 +269,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": { @@ -335,7 +328,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "not_foo": "bar" @@ -343,7 +335,6 @@ teardown: }, { "_index": "index", - "_type": "type", "_id": "id2", "_source": { "foo": "bar" @@ -383,7 +374,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar", @@ -392,7 +382,6 @@ teardown: }, { "_index": "index", - "_type": "type", "_id": "id2", "_source": { "foo": "5", @@ -525,7 +514,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "field1": "123.42 400 " @@ -602,7 +590,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "field1": "123.42 400 " @@ -655,7 +642,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "field1": "123.42 400 " @@ -729,7 +715,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "field1": "123.42 400 " @@ -804,7 +789,6 @@ teardown: "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "field1": "123.42 400 " diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 4eedf598c3f87..f78dc49e9fb8a 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,11 +39,11 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:2.13.1') + api('com.maxmind.geoip2:geoip2:2.16.1') // geoip2 dependencies: api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") - api('com.maxmind.db:maxmind-db:1.3.1') + api('com.maxmind.db:maxmind-db:2.0.0') testImplementation 'org.elasticsearch:geolite2-databases:20191119' } @@ -71,10 +71,8 @@ tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( // geoip WebServiceClient needs apache http client, but we're not using WebServiceClient: 'org.apache.http.HttpEntity', - 'org.apache.http.HttpHost', 'org.apache.http.HttpResponse', 'org.apache.http.StatusLine', - 'org.apache.http.auth.UsernamePasswordCredentials', 'org.apache.http.client.config.RequestConfig$Builder', 'org.apache.http.client.config.RequestConfig', 'org.apache.http.client.methods.CloseableHttpResponse', diff --git a/modules/ingest-geoip/licenses/geoip2-2.13.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-2.13.1.jar.sha1 deleted file mode 100644 index 253d9f12e7a3a..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-2.13.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f27d1a49d5a29dd4a7ac5006ce2eb16b8b9bb888 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..0221476794d3a --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-2.16.1.jar.sha1 @@ -0,0 +1 @@ +c92040bd6ef2cb59be71c6749d08c141ca546caf \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.12.5.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.12.5.jar.sha1 deleted file mode 100644 index 797bcf2b161d4..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52d929d5bb21d0186fe24c09624cc3ee4bafc3b3 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.12.6.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..48ee3bf53c630 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.12.6.jar.sha1 @@ -0,0 +1 @@ +9487231edd6b0b1f14692c9cba9e0462809215d1 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.12.5.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.12.5.jar.sha1 deleted file mode 100644 index ca1bd46bc3cd3..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b064cf057f23d3d35390328c5030847efeffedde \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.12.6.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..f74842887d31b --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.12.6.jar.sha1 @@ -0,0 +1 @@ +fac216b606c1086e36acea6e572ee61572ad1670 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-1.3.1.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-1.3.1.jar.sha1 deleted file mode 100644 index aebff2c3a849c..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-1.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -211bca628225bc0f719051b16deb03a747d7a14f \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..32c18f89c6a29 --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 @@ -0,0 +1 @@ +e7e0fd82da0a160b7928ba214e699a7e6a74fff4 \ No newline at end of file diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java index 2ef5d8da000b1..e88c77b8e33f4 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java @@ -167,7 +167,7 @@ public void testLazyLoading() throws IOException { internalCluster().getInstance(IngestService.class, ingestNode); // the geo-IP database should not be loaded yet as we have no indexed any documents using a pipeline that has a geo-IP processor assertDatabaseLoadStatus(ingestNode, false); - final IndexRequest indexRequest = new IndexRequest("index", "_doc"); + final IndexRequest indexRequest = new IndexRequest("index"); indexRequest.setPipeline("geoip"); indexRequest.source(Collections.singletonMap("ip", "1.1.1.1")); final IndexResponse indexResponse = client().index(indexRequest).actionGet(); diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 15ca93e0fbae4..cda2f5692b0db 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -286,7 +286,7 @@ public void testLazyLoading() throws Exception { } final Map field = Collections.singletonMap("_field", "1.1.1.1"); - final IngestDocument document = new IngestDocument("index", "type", "id", "routing", 1L, VersionType.EXTERNAL, field); + final IngestDocument document = new IngestDocument("index", "id", "routing", 1L, VersionType.EXTERNAL, field); Map config = new HashMap<>(); config.put("field", "_field"); @@ -343,7 +343,7 @@ public void testLoadingCustomDatabase() throws IOException { } final Map field = Collections.singletonMap("_field", "1.1.1.1"); - final IngestDocument document = new IngestDocument("index", "type", "id", "routing", 1L, VersionType.EXTERNAL, field); + final IngestDocument document = new IngestDocument("index", "id", "routing", 1L, VersionType.EXTERNAL, field); Map config = new HashMap<>(); config.put("field", "_field"); diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index f7d5b7d039afc..dabbfde754f92 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -37,8 +37,8 @@ opensearchplugin { dependencies { api "org.apache.lucene:lucene-expressions:${versions.lucene}" - api 'org.antlr:antlr4-runtime:4.5.1-1' - api 'org.ow2.asm:asm:5.0.4' + api 'org.antlr:antlr4-runtime:4.9.3' + api 'org.ow2.asm:asm:9.2' api 'org.ow2.asm:asm-commons:5.0.4' api 'org.ow2.asm:asm-tree:5.0.4' } diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.5.1-1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.5.1-1.jar.sha1 deleted file mode 100644 index f15e50069ba63..0000000000000 --- a/modules/lang-expression/licenses/antlr4-runtime-4.5.1-1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66144204f9d6d7d3f3f775622c2dd7e9bd511d97 diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.9.3.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.9.3.jar.sha1 new file mode 100644 index 0000000000000..13a2367439ede --- /dev/null +++ b/modules/lang-expression/licenses/antlr4-runtime-4.9.3.jar.sha1 @@ -0,0 +1 @@ +81befc16ebedb8b8aea3e4c0835dd5ca7e8523a8 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-5.0.4.jar.sha1 b/modules/lang-expression/licenses/asm-5.0.4.jar.sha1 deleted file mode 100644 index 9223dba380f8c..0000000000000 --- a/modules/lang-expression/licenses/asm-5.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0da08b8cce7bbf903602a25a3a163ae252435795 diff --git a/modules/lang-expression/licenses/asm-9.2.jar.sha1 b/modules/lang-expression/licenses/asm-9.2.jar.sha1 new file mode 100644 index 0000000000000..28f456d3cbcb2 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.2.jar.sha1 @@ -0,0 +1 @@ +81a03f76019c67362299c40e0ba13405f5467bff \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java index 2a8236d5e0e4b..05064f66fef80 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java @@ -85,7 +85,6 @@ public void testAllOpsDisabledIndexedScripts() throws IOException { new SearchSourceBuilder().scriptField("test1", new Script(ScriptType.STORED, null, "script1", Collections.emptyMap())) ) .setIndices("test") - .setTypes("scriptTest") .get(); fail("search script should have been rejected"); } catch (Exception e) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java index 0e0f21405818b..d0941cbc9452f 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, null); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); } private FieldScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java index 83b5c0930d1d0..f3559da59f992 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java @@ -76,7 +76,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, null); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); } private NumberSortScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java index a71932ded1a7a..af7fc580f8a65 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java @@ -76,7 +76,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, null); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); } private TermsSetQueryScript.LeafFactory compile(String expression) { diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index a26798bf90b91..511a6b144c21a 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -38,7 +38,7 @@ opensearchplugin { } dependencies { - api "com.github.spullara.mustache.java:compiler:0.9.6" + api "com.github.spullara.mustache.java:compiler:0.9.10" } restResources { diff --git a/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 new file mode 100644 index 0000000000000..6336318c2ce1a --- /dev/null +++ b/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 @@ -0,0 +1 @@ +6111ae24e3be9ecbd75f5fe908583fc14b4f0174 \ No newline at end of file diff --git a/modules/lang-mustache/licenses/compiler-0.9.6.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.6.jar.sha1 deleted file mode 100644 index 9c0e54641475b..0000000000000 --- a/modules/lang-mustache/licenses/compiler-0.9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b8707299c34406ed0ba40bbf8513352ac4765c9 \ No newline at end of file diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java index df53fcc0c3b6f..7622eb55b7b49 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java @@ -196,9 +196,11 @@ public void testIndexedTemplateClient() throws Exception { Map templateParams = new HashMap<>(); templateParams.put("fieldParam", "foo"); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest( - new SearchRequest("test").types("type") - ).setScript("testTemplate").setScriptType(ScriptType.STORED).setScriptParams(templateParams).get(); + SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("testTemplate") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams) + .get(); assertHitCount(searchResponse.getResponse(), 4); assertAcked(client().admin().cluster().prepareDeleteStoredScript("testTemplate")); @@ -238,14 +240,16 @@ public void testIndexedTemplate() throws Exception { Map templateParams = new HashMap<>(); templateParams.put("fieldParam", "foo"); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest( - new SearchRequest().indices("test").types("type") - ).setScript("1a").setScriptType(ScriptType.STORED).setScriptParams(templateParams).get(); + SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test")) + .setScript("1a") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams) + .get(); assertHitCount(searchResponse.getResponse(), 4); expectThrows( ResourceNotFoundException.class, - () -> new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test").types("type")) + () -> new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test")) .setScript("1000") .setScriptType(ScriptType.STORED) .setScriptParams(templateParams) @@ -253,7 +257,7 @@ public void testIndexedTemplate() throws Exception { ); templateParams.put("fieldParam", "bar"); - searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test").types("type")) + searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) .setScript("2") .setScriptType(ScriptType.STORED) .setScriptParams(templateParams) @@ -304,7 +308,7 @@ public void testIndexedTemplateOverwrite() throws Exception { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex").types("test")) + () -> new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) .setScript("git01") .setScriptType(ScriptType.STORED) .setScriptParams(templateParams) @@ -320,9 +324,11 @@ public void testIndexedTemplateOverwrite() throws Exception { .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(0))), XContentType.JSON) ); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest( - new SearchRequest("testindex").types("test") - ).setScript("git01").setScriptType(ScriptType.STORED).setScriptParams(templateParams).get(); + SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) + .setScript("git01") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams) + .get(); assertHitCount(searchResponse.getResponse(), 1); } } @@ -360,9 +366,11 @@ public void testIndexedTemplateWithArray() throws Exception { String[] fieldParams = { "foo", "bar" }; arrayTemplateParams.put("fieldParam", fieldParams); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest( - new SearchRequest("test").types("type") - ).setScript("4").setScriptType(ScriptType.STORED).setScriptParams(arrayTemplateParams).get(); + SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("4") + .setScriptType(ScriptType.STORED) + .setScriptParams(arrayTemplateParams) + .get(); assertHitCount(searchResponse.getResponse(), 5); } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestMultiSearchTemplateAction.java index c4c7ec9bf12b9..fc5a0ff601a00 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestMultiSearchTemplateAction.java @@ -33,7 +33,6 @@ package org.opensearch.script.mustache; import org.opensearch.client.node.NodeClient; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -53,9 +52,6 @@ import static org.opensearch.rest.RestRequest.Method.POST; public class RestMultiSearchTemplateAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestMultiSearchTemplateAction.class); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" - + " Specifying types in multi search template requests is deprecated."; private static final Set RESPONSE_PARAMS; @@ -95,14 +91,6 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { MultiSearchTemplateRequest multiRequest = parseRequest(request, allowExplicitIndex); - - // Emit a single deprecation message if any search template contains types. - for (SearchTemplateRequest searchTemplateRequest : multiRequest.requests()) { - if (searchTemplateRequest.getRequest().types().length > 0) { - deprecationLogger.deprecate("msearch_with_types", TYPES_DEPRECATION_MESSAGE); - break; - } - } return channel -> client.execute(MultiSearchTemplateAction.INSTANCE, multiRequest, new RestToXContentListener<>(channel)); } diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java index aaf3126876a59..1a663dcb18235 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java @@ -69,13 +69,10 @@ public void testParseRequest() throws Exception { assertThat(request.requests().get(0).getRequest().preference(), nullValue()); assertThat(request.requests().get(1).getRequest().indices()[0], equalTo("test2")); assertThat(request.requests().get(1).getRequest().indices()[1], equalTo("test3")); - assertThat(request.requests().get(1).getRequest().types()[0], equalTo("type1")); assertThat(request.requests().get(1).getRequest().requestCache(), nullValue()); assertThat(request.requests().get(1).getRequest().preference(), equalTo("_local")); assertThat(request.requests().get(2).getRequest().indices()[0], equalTo("test4")); assertThat(request.requests().get(2).getRequest().indices()[1], equalTo("test1")); - assertThat(request.requests().get(2).getRequest().types()[0], equalTo("type2")); - assertThat(request.requests().get(2).getRequest().types()[1], equalTo("type1")); assertThat(request.requests().get(2).getRequest().routing(), equalTo("123")); assertNotNull(request.requests().get(0).getScript()); assertNotNull(request.requests().get(1).getScript()); diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/RestMultiSearchTemplateActionTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/RestMultiSearchTemplateActionTests.java deleted file mode 100644 index 655d49a0273b5..0000000000000 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/RestMultiSearchTemplateActionTests.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.script.mustache; - -import org.opensearch.common.bytes.BytesArray; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.RestRequest; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.nio.charset.StandardCharsets; - -public class RestMultiSearchTemplateActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestMultiSearchTemplateAction(Settings.EMPTY)); - } - - public void testTypeInPath() { - String content = "{ \"index\": \"some_index\" } \n" + "{\"source\": {\"query\" : {\"match_all\" :{}}}} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_msearch/template") - .withContent(bytesContent, XContentType.JSON) - .build(); - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestMultiSearchTemplateAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() { - String content = "{ \"index\": \"some_index\", \"type\": \"some_type\" } \n" + "{\"source\": {\"query\" : {\"match_all\" :{}}}} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/some_index/_msearch/template") - .withContent(bytesContent, XContentType.JSON) - .build(); - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestMultiSearchTemplateAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/RestSearchTemplateActionTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/RestSearchTemplateActionTests.java deleted file mode 100644 index 4f95da755f8fc..0000000000000 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/RestSearchTemplateActionTests.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.script.mustache; - -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.action.search.RestSearchAction; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -public class RestSearchTemplateActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestSearchTemplateAction()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_search/template") - .build(); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/_search/template") - .withParams(params) - .build(); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java index 84734e55e241c..0a2bb247e3c1a 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java @@ -36,7 +36,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -183,7 +182,7 @@ public void testSourceToXContent() throws IOException { } public void testSearchResponseToXContent() throws IOException { - SearchHit hit = new SearchHit(1, "id", new Text("type"), Collections.emptyMap(), Collections.emptyMap()); + SearchHit hit = new SearchHit(1, "id", Collections.emptyMap(), Collections.emptyMap()); hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; @@ -229,7 +228,6 @@ public void testSearchResponseToXContent() throws IOException { .field("max_score", 1.5F) .startArray("hits") .startObject() - .field("_type", "type") .field("_id", "id") .field("_score", 2.0F) .endObject() diff --git a/modules/lang-mustache/src/test/resources/org/opensearch/script/mustache/simple-msearch-template.json b/modules/lang-mustache/src/test/resources/org/opensearch/script/mustache/simple-msearch-template.json index 11a0091492c4d..1809b4012fde1 100644 --- a/modules/lang-mustache/src/test/resources/org/opensearch/script/mustache/simple-msearch-template.json +++ b/modules/lang-mustache/src/test/resources/org/opensearch/script/mustache/simple-msearch-template.json @@ -1,6 +1,6 @@ {"index":["test0", "test1"], "request_cache": true} {"source": {"query" : {"match_{{template}}" :{}}}, "params": {"template": "all" } } -{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"index" : "test2,test3", "preference": "_local"} {"source": {"query" : {"match_{{template}}" :{}}}, "params": {"template": "all" } } -{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"index" : ["test4", "test1"], "routing": "123"} {"source": {"query" : {"match_{{template}}" :{}}}, "params": {"template": "all" } } diff --git a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/30_search_template.yml b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/30_search_template.yml index 22192530b9ec1..a9d3c2da68617 100644 --- a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/30_search_template.yml +++ b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/30_search_template.yml @@ -141,10 +141,6 @@ --- "Test with new response format": - - skip: - version: " - 6.99.99" - reason: hits.total is returned as an object in 7.0.0 - - do: index: index: test diff --git a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml index fa56f5c0f72b1..e92e10b9ad276 100644 --- a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml +++ b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yml @@ -174,10 +174,6 @@ setup: --- "Test with rest_total_hits_as_int": - - skip: - version: " - 6.99.99" - reason: hits.total is returned as an object in 7.0.0 - - do: put_script: id: stored_template_1 diff --git a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml index d59bfa9ffc322..accb55624dd06 100644 --- a/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml +++ b/modules/lang-mustache/src/yamlRestTest/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml @@ -25,15 +25,15 @@ setup: bulk: refresh: true body: - - '{"index": {"_index": "test-0", "_type": "_doc"}}' + - '{"index": {"_index": "test-0"}}' - '{"ip": "10.0.0.1", "integer": 38, "float": 12.5713, "name": "Ruth", "bool": true}' - - '{"index": {"_index": "test-0", "_type": "_doc"}}' + - '{"index": {"_index": "test-0"}}' - '{"ip": "10.0.0.2", "integer": 42, "float": 15.3393, "name": "Jackie", "surname": "Bowling", "bool": false}' - - '{"index": {"_index": "test-1", "_type": "_doc"}}' + - '{"index": {"_index": "test-1"}}' - '{"ip": "10.0.0.3", "integer": 29, "float": 19.0517, "name": "Stephanie", "bool": true}' - - '{"index": {"_index": "test-1", "_type": "_doc"}}' + - '{"index": {"_index": "test-1"}}' - '{"ip": "10.0.0.4", "integer": 19, "float": 19.3717, "surname": "Hamilton", "bool": true}' - - '{"index": {"_index": "test-2", "_type": "_doc"}}' + - '{"index": {"_index": "test-2"}}' - '{"ip": "10.0.0.5", "integer": 0, "float": 17.3349, "name": "Natalie", "bool": false}' --- diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 298f28be8cc54..eb93cdc77fb9c 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -46,11 +46,11 @@ testClusters.all { dependencies { api 'org.antlr:antlr4-runtime:4.5.3' - api 'org.ow2.asm:asm-util:7.2' + api 'org.ow2.asm:asm-util:9.2' api 'org.ow2.asm:asm-tree:7.2' api 'org.ow2.asm:asm-commons:7.2' api 'org.ow2.asm:asm-analysis:7.2' - api 'org.ow2.asm:asm:7.2' + api 'org.ow2.asm:asm:9.2' api project('spi') } diff --git a/modules/lang-painless/licenses/asm-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-7.2.jar.sha1 deleted file mode 100644 index acb97fc1a0249..0000000000000 --- a/modules/lang-painless/licenses/asm-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa637eb67eb7628c915d73762b681ae7ff0b9731 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-9.2.jar.sha1 new file mode 100644 index 0000000000000..28f456d3cbcb2 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.2.jar.sha1 @@ -0,0 +1 @@ +81a03f76019c67362299c40e0ba13405f5467bff \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-util-7.2.jar.sha1 deleted file mode 100644 index 6f70a0eea65ab..0000000000000 --- a/modules/lang-painless/licenses/asm-util-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3ae34e57fa8a4040e28247291d0cc3d6b8c7bcf \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.2.jar.sha1 new file mode 100644 index 0000000000000..5cb89aa115f30 --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.2.jar.sha1 @@ -0,0 +1 @@ +fbc178fc5ba3dab50fd7e8a5317b8b647c8e8946 \ No newline at end of file diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java index d0041b22929e1..fb8d2eccfa043 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java @@ -206,13 +206,10 @@ public void testDynamicListWrongIndex() { * the parser with right-curly brackets to allow statements to be delimited by them at the end of blocks. */ public void testRCurlyNotDelim() { - IllegalArgumentException e = expectScriptThrows( - IllegalArgumentException.class, - () -> { - // We don't want PICKY here so we get the normal error message - exec("def i = 1} return 1", emptyMap(), emptyMap(), false); - } - ); + IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> { + // We don't want PICKY here so we get the normal error message + exec("def i = 1} return 1", emptyMap(), emptyMap(), false); + }); assertEquals("unexpected token ['}'] was expecting one of [{, ';'}].", e.getMessage()); } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml index fd5c89b490d39..cb118ed9d562f 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml @@ -21,7 +21,6 @@ - match: { _index: test_1 } - match: { _id: "1" } - - match: { _type: _doc } - match: { _version: 2 } - do: @@ -43,7 +42,6 @@ - match: { _index: test_1 } - match: { _id: "1" } - - match: { _type: _doc } - match: { _version: 3 } - do: @@ -65,7 +63,6 @@ - match: { _index: test_1 } - match: { _id: "1" } - - match: { _type: _doc } - match: { _version: 4 } - do: @@ -89,7 +86,6 @@ - match: { _index: test_1 } - match: { _id: "1" } - - match: { _type: _doc } - match: { _version: 5 } - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml index 0ce1e369cb7c5..a006fde630716 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml @@ -452,10 +452,6 @@ --- "Exception on negative score": - - skip: - version: " - 6.99.99" - reason: "check on negative scores was added from 7.0.0 on" - - do: index: index: test diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml index 50fc0eea501df..57e7b4e31e057 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml @@ -1,10 +1,6 @@ # Sanity integration test to make sure the custom context and whitelist work for moving_fn pipeline agg # setup: - - skip: - version: " - 6.3.99" - reason: "moving_fn added in 6.4.0" - - do: indices.create: index: test diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/80_script_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/80_script_score.yml index 495ca2131d886..d506db0cb0d3e 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/80_script_score.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/80_script_score.yml @@ -1,10 +1,4 @@ # Integration tests for ScriptScoreQuery using Painless - -setup: - - skip: - version: " - 6.99.99" - reason: "script score query was introduced in 7.0.0" - --- "Math functions": diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/90_interval_query_filter.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/90_interval_query_filter.yml index 0a6cf993e2a2e..7db3eb8b9b2aa 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/90_interval_query_filter.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/90_interval_query_filter.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: "Implemented in 7.0" - - do: indices.create: index: test diff --git a/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java b/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java index d400fb69f34ca..7ddb27e28d94c 100644 --- a/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java +++ b/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java @@ -203,7 +203,7 @@ private SearchRequestBuilder searchByNumericRange(int low, int high) { } private SearchRequestBuilder prepareSearch() { - SearchRequestBuilder request = client().prepareSearch("test").setTypes("test"); + SearchRequestBuilder request = client().prepareSearch("test"); request.addStoredField("foo.token_count"); request.addStoredField("foo.token_count_without_position_increments"); if (loadCountedFields) { diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureFieldMapper.java index 6f64be86f934c..31fef7301477a 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureFieldMapper.java @@ -135,11 +135,11 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), mapperService) { + return new SourceValueFetcher(name(), context) { @Override protected Float parseSourceValue(Object value) { return objectToFloat(value); diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapper.java index dd29826af3af8..b9effebeb9e61 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapper.java @@ -65,7 +65,7 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + typeName() + "]."); } diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java index b35f790901f33..43853eb40f432 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java @@ -105,8 +105,8 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.identity(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.identity(name(), context, format); } @Override diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java index 93f3adf476836..78a9e389eb63f 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java @@ -260,11 +260,11 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), mapperService) { + return new SourceValueFetcher(name(), context) { @Override protected Double parseSourceValue(Object value) { double doubleValue; diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 6c55c2ecd0f04..7bf102584a379 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -329,8 +329,8 @@ private ShingleFieldType shingleFieldForPositions(int positions) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.toString(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.toString(name(), context, format); } @Override @@ -439,10 +439,10 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, bool } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { // Because this internal field is modelled as a multi-field, SourceValueFetcher will look up its // parent field in _source. So we don't need to use the parent field name here. - return SourceValueFetcher.toString(name(), mapperService, format); + return SourceValueFetcher.toString(name(), context, format); } @Override @@ -545,10 +545,10 @@ void setPrefixFieldType(PrefixFieldType prefixFieldType) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { // Because this internal field is modelled as a multi-field, SourceValueFetcher will look up its // parent field in _source. So we don't need to use the parent field name here. - return SourceValueFetcher.toString(name(), mapperService, format); + return SourceValueFetcher.toString(name(), context, format); } @Override diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java index ed71ffa5158d7..fd029503e9a7b 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java @@ -36,6 +36,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; @@ -119,7 +120,7 @@ static class TokenCountFieldType extends NumberFieldMapper.NumberFieldType { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (hasDocValues() == false) { return lookup -> org.opensearch.common.collect.List.of(); } diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml index 8a874d30591f6..6fea35eb21f4e 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: "The rank feature field/query was introduced in 7.0.0" - - do: indices.create: index: test diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml index f524bd93bb600..d4d5d2a360406 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: "The rank_features field was introduced in 7.0.0" - - do: indices.create: index: test diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/10_basic.yml index ffe05097748a6..21843dad1d177 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/10_basic.yml @@ -19,7 +19,6 @@ setup: - do: index: index: test - type: _doc id: 1 body: a_field: "quick brown fox jump lazy dog" @@ -28,7 +27,6 @@ setup: - do: index: index: test - type: _doc id: 2 body: a_field: "xylophone xylophone xylophone" @@ -41,7 +39,6 @@ setup: - do: get: index: test - type: _doc id: 1 - is_true: found diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml index 15778393959e5..58441abac8f88 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml @@ -22,7 +22,6 @@ setup: - do: index: index: test - type: _doc id: 1 body: a_field: "quick brown fox jump lazy dog" diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java index 74c884c9d0e25..b831e76c4ad71 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java @@ -132,7 +132,7 @@ public void testParentWithMultipleBuckets() throws Exception { TopHits topHits = childrenBucket.getAggregations().get("top_comments"); logger.info("total_hits={}", topHits.getHits().getTotalHits().value); for (SearchHit searchHit : topHits.getHits()) { - logger.info("hit= {} {} {}", searchHit.getSortValues()[0], searchHit.getType(), searchHit.getId()); + logger.info("hit= {} {}", searchHit.getSortValues()[0], searchHit.getId()); } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java index 2972b170e07b7..6910964ac8c58 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java @@ -201,7 +201,7 @@ public void testSimpleChildQuery() throws Exception { // TEST FETCHING _parent from child SearchResponse searchResponse; - searchResponse = client().prepareSearch("test").setQuery(idsQuery("doc").addIds("c1")).get(); + searchResponse = client().prepareSearch("test").setQuery(idsQuery().addIds("c1")).get(); assertNoFailures(searchResponse); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); @@ -608,7 +608,7 @@ public void testExplainUsage() throws Exception { assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getAt(0).getExplanation().getDescription(), containsString("join value p1")); - ExplainResponse explainResponse = client().prepareExplain("test", "doc", parentId) + ExplainResponse explainResponse = client().prepareExplain("test", parentId) .setQuery(hasChildQuery("child", termQuery("c_field", "1"), ScoreMode.Max)) .get(); assertThat(explainResponse.isExists(), equalTo(true)); diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java index 913cba6950228..03cff9c19a703 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java @@ -151,9 +151,7 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getTotalHits().value, equalTo(2L)); assertThat(innerHits.getAt(0).getId(), equalTo("c1")); - assertThat(innerHits.getAt(0).getType(), equalTo("doc")); assertThat(innerHits.getAt(1).getId(), equalTo("c2")); - assertThat(innerHits.getAt(1).getType(), equalTo("doc")); final boolean seqNoAndTerm = randomBoolean(); response = client().prepareSearch("articles") @@ -172,11 +170,8 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getTotalHits().value, equalTo(3L)); assertThat(innerHits.getAt(0).getId(), equalTo("c4")); - assertThat(innerHits.getAt(0).getType(), equalTo("doc")); assertThat(innerHits.getAt(1).getId(), equalTo("c5")); - assertThat(innerHits.getAt(1).getType(), equalTo("doc")); assertThat(innerHits.getAt(2).getId(), equalTo("c6")); - assertThat(innerHits.getAt(2).getType(), equalTo("doc")); if (seqNoAndTerm) { assertThat(innerHits.getAt(0).getPrimaryTerm(), equalTo(1L)); @@ -301,7 +296,6 @@ public void testRandomParentChild() throws Exception { int offset2 = 0; for (int parent = 0; parent < numDocs; parent++) { SearchHit searchHit = searchResponse.getHits().getAt(parent); - assertThat(searchHit.getType(), equalTo("doc")); assertThat(searchHit.getId(), equalTo(String.format(Locale.ENGLISH, "p_%03d", parent))); assertThat(searchHit.getShard(), notNullValue()); @@ -309,7 +303,6 @@ public void testRandomParentChild() throws Exception { assertThat(inner.getTotalHits().value, equalTo((long) child1InnerObjects[parent])); for (int child = 0; child < child1InnerObjects[parent] && child < size; child++) { SearchHit innerHit = inner.getAt(child); - assertThat(innerHit.getType(), equalTo("doc")); String childId = String.format(Locale.ENGLISH, "c1_%04d", offset1 + child); assertThat(innerHit.getId(), equalTo(childId)); assertThat(innerHit.getNestedIdentity(), nullValue()); @@ -320,7 +313,6 @@ public void testRandomParentChild() throws Exception { assertThat(inner.getTotalHits().value, equalTo((long) child2InnerObjects[parent])); for (int child = 0; child < child2InnerObjects[parent] && child < size; child++) { SearchHit innerHit = inner.getAt(child); - assertThat(innerHit.getType(), equalTo("doc")); String childId = String.format(Locale.ENGLISH, "c2_%04d", offset2 + child); assertThat(innerHit.getId(), equalTo(childId)); assertThat(innerHit.getNestedIdentity(), nullValue()); @@ -376,16 +368,12 @@ public void testInnerHitsOnHasParent() throws Exception { SearchHit searchHit = response.getHits().getAt(0); assertThat(searchHit.getId(), equalTo("3")); - assertThat(searchHit.getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getTotalHits().value, equalTo(1L)); - assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getAt(0).getId(), equalTo("1")); searchHit = response.getHits().getAt(1); assertThat(searchHit.getId(), equalTo("4")); - assertThat(searchHit.getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getTotalHits().value, equalTo(1L)); - assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getAt(0).getId(), equalTo("2")); } @@ -430,12 +418,10 @@ public void testParentChildMultipleLayers() throws Exception { SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getTotalHits().value, equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("3")); - assertThat(innerHits.getAt(0).getType(), equalTo("doc")); innerHits = innerHits.getAt(0).getInnerHits().get("remark"); assertThat(innerHits.getTotalHits().value, equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("5")); - assertThat(innerHits.getAt(0).getType(), equalTo("doc")); response = client().prepareSearch("articles") .setQuery( @@ -455,12 +441,10 @@ public void testParentChildMultipleLayers() throws Exception { innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getTotalHits().value, equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("4")); - assertThat(innerHits.getAt(0).getType(), equalTo("doc")); innerHits = innerHits.getAt(0).getInnerHits().get("remark"); assertThat(innerHits.getTotalHits().value, equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("6")); - assertThat(innerHits.getAt(0).getType(), equalTo("doc")); } public void testRoyals() throws Exception { @@ -613,7 +597,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { assertHitCount(response, 1); } - public void testNestedInnerHitWrappedInParentChildInnerhit() throws Exception { + public void testNestedInnerHitWrappedInParentChildInnerhit() { assertAcked( prepareCreate("test").addMapping( "doc", @@ -646,7 +630,7 @@ public void testNestedInnerHitWrappedInParentChildInnerhit() throws Exception { assertThat(hit.getInnerHits().get("child_type").getAt(0).getInnerHits().get("nested_type").getAt(0).field("_parent"), nullValue()); } - public void testInnerHitsWithIgnoreUnmapped() throws Exception { + public void testInnerHitsWithIgnoreUnmapped() { assertAcked( prepareCreate("index1").addMapping( "doc", @@ -676,7 +660,7 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { assertSearchHits(response, "1", "3"); } - public void testTooHighResultWindow() throws Exception { + public void testTooHighResultWindow() { assertAcked( prepareCreate("index1").addMapping( "doc", diff --git a/modules/parent-join/src/main/java/org/opensearch/join/mapper/MetaJoinFieldMapper.java b/modules/parent-join/src/main/java/org/opensearch/join/mapper/MetaJoinFieldMapper.java index 4570a67d67e63..2b0d3e4764b2a 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/mapper/MetaJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/mapper/MetaJoinFieldMapper.java @@ -39,7 +39,6 @@ import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.mapper.FieldMapper; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ParseContext; import org.opensearch.index.mapper.StringFieldType; import org.opensearch.index.mapper.TextSearchInfo; @@ -111,7 +110,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for metadata field [" + typeName() + "]."); } diff --git a/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentIdFieldMapper.java b/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentIdFieldMapper.java index 4e3d26d73c30f..29a02a5bc8032 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentIdFieldMapper.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentIdFieldMapper.java @@ -48,11 +48,11 @@ import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.mapper.FieldMapper; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ParseContext; import org.opensearch.index.mapper.StringFieldType; import org.opensearch.index.mapper.TextSearchInfo; import org.opensearch.index.mapper.ValueFetcher; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.lookup.SearchLookup; @@ -132,7 +132,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + typeName() + "]."); } diff --git a/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java index 7d34e6d40a752..662bacc0fb08a 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java @@ -57,6 +57,7 @@ import org.opensearch.index.mapper.StringFieldType; import org.opensearch.index.mapper.TextSearchInfo; import org.opensearch.index.mapper.ValueFetcher; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.lookup.SearchLookup; @@ -243,8 +244,8 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.identity(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.identity(name(), context, format); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java index 62040b3893e83..628345a625d1b 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java @@ -474,7 +474,7 @@ public void testUpdateRelations() throws Exception { .endObject() ); docMapper = indexService.mapperService() - .merge("_doc", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE); + .merge("type", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); assertNotNull(mapper); assertEquals("join_field", mapper.name()); @@ -501,7 +501,7 @@ public void testUpdateRelations() throws Exception { .endObject() ); docMapper = indexService.mapperService() - .merge("_doc", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE); + .merge("type", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); assertNotNull(mapper); assertEquals("join_field", mapper.name()); diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java index e069fc23a141d..5595c98a439bf 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java @@ -290,13 +290,9 @@ public void testFromJson() throws IOException { } public void testToQueryInnerQueryType() throws IOException { - String[] searchTypes = new String[] { TYPE }; QueryShardContext shardContext = createShardContext(); - shardContext.setTypes(searchTypes); HasChildQueryBuilder hasChildQueryBuilder = hasChildQuery(CHILD_DOC, new IdsQueryBuilder().addIds("id"), ScoreMode.None); Query query = hasChildQueryBuilder.toQuery(shardContext); - // verify that the context types are still the same as the ones we previously set - assertThat(shardContext.getTypes(), equalTo(searchTypes)); assertLateParsingQuery(query, CHILD_DOC, "id"); } diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java index 9783cb703ade1..0f983799a6d25 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java @@ -192,13 +192,9 @@ public void testIllegalValues() throws IOException { } public void testToQueryInnerQueryType() throws IOException { - String[] searchTypes = new String[] { TYPE }; QueryShardContext shardContext = createShardContext(); - shardContext.setTypes(searchTypes); HasParentQueryBuilder hasParentQueryBuilder = new HasParentQueryBuilder(PARENT_DOC, new IdsQueryBuilder().addIds("id"), false); Query query = hasParentQueryBuilder.toQuery(shardContext); - // verify that the context types are still the same as the ones we previously set - assertThat(shardContext.getTypes(), equalTo(searchTypes)); HasChildQueryBuilderTests.assertLateParsingQuery(query, PARENT_DOC, "id"); } diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml index bff5639e4d270..bb2d39fbbdd4e 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/20_parent_join.yml @@ -49,35 +49,29 @@ teardown: - match: { hits.total: 6 } - match: { hits.hits.0._index: "test" } - - match: { hits.hits.0._type: "_doc" } - match: { hits.hits.0._id: "3" } - match: { hits.hits.0._source.join_field.name: "child" } - match: { hits.hits.0._source.join_field.parent: "1" } - is_false: hits.hits.0.fields.join_field#child } - match: { hits.hits.1._index: "test" } - - match: { hits.hits.1._type: "_doc" } - match: { hits.hits.1._id: "4" } - match: { hits.hits.1._source.join_field.name: "child" } - match: { hits.hits.1._source.join_field.parent: "1" } - is_false: hits.hits.1.fields.join_field#child } - match: { hits.hits.2._index: "test" } - - match: { hits.hits.2._type: "_doc" } - match: { hits.hits.2._id: "5" } - match: { hits.hits.2._source.join_field.name: "child" } - match: { hits.hits.2._source.join_field.parent: "2" } - is_false: hits.hits.2.fields.join_field#child } - match: { hits.hits.3._index: "test" } - - match: { hits.hits.3._type: "_doc" } - match: { hits.hits.3._id: "6" } - match: { hits.hits.3._source.join_field.name: "grand_child" } - match: { hits.hits.3._source.join_field.parent: "5" } - match: { hits.hits.4._index: "test" } - - match: { hits.hits.4._type: "_doc" } - match: { hits.hits.4._id: "1" } - match: { hits.hits.4._source.join_field.name: "parent" } - is_false: hits.hits.4._source.join_field.parent - match: { hits.hits.5._index: "test" } - - match: { hits.hits.5._type: "_doc" } - match: { hits.hits.5._id: "2" } - match: { hits.hits.5._source.join_field.name: "parent" } - is_false: hits.hits.5._source.join_field.parent @@ -96,12 +90,10 @@ teardown: - match: { hits.total: 2 } - match: { hits.hits.0._index: "test" } - - match: { hits.hits.0._type: "_doc" } - match: { hits.hits.0._id: "3" } - match: { hits.hits.0._source.join_field.name: "child" } - match: { hits.hits.0._source.join_field.parent: "1" } - match: { hits.hits.1._index: "test" } - - match: { hits.hits.1._type: "_doc" } - match: { hits.hits.1._id: "4" } - match: { hits.hits.1._source.join_field.name: "child" } - match: { hits.hits.1._source.join_field.parent: "1" } diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index c111590d7a2ca..f76f14a6d9575 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -889,7 +889,6 @@ public void testWithMultiplePercolatorFields() throws Exception { .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getType(), equalTo("type")); assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); response = client().prepareSearch() @@ -898,7 +897,6 @@ public void testWithMultiplePercolatorFields() throws Exception { .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getType(), equalTo("type")); assertThat(response.getHits().getAt(0).getIndex(), equalTo("test2")); // Unacceptable: diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java index 5e245f7082ada..672d4dd15a254 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java @@ -501,13 +501,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { return rewritten; } } - GetRequest getRequest; - if (indexedDocumentType != null) { - deprecationLogger.deprecate("percolate_with_type", TYPE_DEPRECATION_MESSAGE); - getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentType, indexedDocumentId); - } else { - getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentId); - } + GetRequest getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentId); getRequest.preference("_local"); getRequest.routing(indexedDocumentRouting); getRequest.preference(indexedDocumentPreference); diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java index 72adc5539d6d1..a8b0395dd84e0 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java @@ -72,7 +72,6 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.ParametrizedFieldMapper; import org.opensearch.index.mapper.ParseContext; @@ -196,7 +195,12 @@ static BinaryFieldMapper createQueryBuilderFieldBuilder(BuilderContext context) } static RangeFieldMapper createExtractedRangeFieldBuilder(String name, RangeType rangeType, BuilderContext context) { - RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder(name, rangeType, true); + RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder( + name, + rangeType, + true, + hasIndexCreated(context.indexSettings()) ? context.indexCreatedVersion() : null + ); // For now no doc values, because in processQuery(...) only the Lucene range fields get added: builder.docValues(false); return builder.build(context); @@ -245,8 +249,8 @@ public Query termQuery(Object value, QueryShardContext context) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.identity(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.identity(name(), context, format); } Query percolateQuery( diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java index 670aa74501f60..9b4e42d239750 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -38,7 +38,6 @@ import org.apache.lucene.search.QueryVisitor; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; -import org.opensearch.common.text.Text; import org.opensearch.search.SearchHit; import org.opensearch.search.fetch.FetchContext; import org.opensearch.search.fetch.FetchSubPhase; @@ -108,13 +107,7 @@ public void process(HitContext hit) throws IOException { int slot = (int) matchedSlot; BytesReference document = percolateQuery.getDocuments().get(slot); HitContext subContext = new HitContext( - new SearchHit( - slot, - "unknown", - new Text(hit.hit().getType()), - Collections.emptyMap(), - Collections.emptyMap() - ), + new SearchHit(slot, "unknown", Collections.emptyMap(), Collections.emptyMap()), percolatorLeafReaderContext, slot, new SourceLookup() diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java index 3b0830b7e4519..5f11feee8f441 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java @@ -184,7 +184,6 @@ protected String[] shuffleProtectedFields() { @Override protected GetResponse executeGet(GetRequest getRequest) { assertThat(getRequest.index(), Matchers.equalTo(indexedDocumentIndex)); - assertThat(getRequest.type(), Matchers.equalTo(MapperService.SINGLE_MAPPING_NAME)); assertThat(getRequest.id(), Matchers.equalTo(indexedDocumentId)); assertThat(getRequest.routing(), Matchers.equalTo(indexedDocumentRouting)); assertThat(getRequest.preference(), Matchers.equalTo(indexedDocumentPreference)); @@ -193,7 +192,6 @@ protected GetResponse executeGet(GetRequest getRequest) { return new GetResponse( new GetResult( indexedDocumentIndex, - MapperService.SINGLE_MAPPING_NAME, indexedDocumentId, 0, 1, @@ -208,7 +206,6 @@ protected GetResponse executeGet(GetRequest getRequest) { return new GetResponse( new GetResult( indexedDocumentIndex, - MapperService.SINGLE_MAPPING_NAME, indexedDocumentId, UNASSIGNED_SEQ_NO, 0, @@ -341,7 +338,6 @@ public void testFromJsonWithType() throws IOException { + "\"}}" ); rewriteAndFetch(queryBuilder, queryShardContext).toQuery(queryShardContext); - assertWarnings(PercolateQueryBuilder.TYPE_DEPRECATION_MESSAGE); } public void testBothDocumentAndDocumentsSpecified() { diff --git a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index 08d344687adc7..35ebb2b099139 100644 --- a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -1,10 +1,5 @@ --- "Test percolator basics via rest": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: queries_index @@ -74,7 +69,7 @@ percolate: field: query index: documents_index - id: some_id + id: some_id - match: { hits.total: 1 } - do: diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java index 7c385cd45a840..2b1c56d9bba3b 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -36,7 +36,6 @@ import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -44,7 +43,6 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; @@ -87,13 +85,7 @@ public void testDCGAt() { SearchHit[] hits = new SearchHit[6]; for (int i = 0; i < 6; i++) { rated.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); - hits[i] = new SearchHit( - i, - Integer.toString(i), - new Text(MapperService.SINGLE_MAPPING_NAME), - Collections.emptyMap(), - Collections.emptyMap() - ); + hits[i] = new SearchHit(i, Integer.toString(i), Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); @@ -143,13 +135,7 @@ public void testDCGAtSixMissingRatings() { rated.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); } } - hits[i] = new SearchHit( - i, - Integer.toString(i), - new Text(MapperService.SINGLE_MAPPING_NAME), - Collections.emptyMap(), - Collections.emptyMap() - ); + hits[i] = new SearchHit(i, Integer.toString(i), Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); @@ -206,13 +192,7 @@ public void testDCGAtFourMoreRatings() { // only create four hits SearchHit[] hits = new SearchHit[4]; for (int i = 0; i < 4; i++) { - hits[i] = new SearchHit( - i, - Integer.toString(i), - new Text(MapperService.SINGLE_MAPPING_NAME), - Collections.emptyMap(), - Collections.emptyMap() - ); + hits[i] = new SearchHit(i, Integer.toString(i), Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java index 4fb0089a32cb1..723a1e2202e2b 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -43,7 +42,6 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; @@ -130,13 +128,7 @@ private SearchHit[] createSearchHits(List rated, Integer[] releva if (relevanceRatings[i] != null) { rated.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); } - hits[i] = new SearchHit( - i, - Integer.toString(i), - new Text(MapperService.SINGLE_MAPPING_NAME), - Collections.emptyMap(), - Collections.emptyMap() - ); + hits[i] = new SearchHit(i, Integer.toString(i), Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } return hits; diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java index befb9bdf371ab..2cd16c05f2a20 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -216,7 +215,7 @@ public void testXContentParsingIsNotLenient() throws IOException { private static SearchHit[] createSearchHits(int from, int to, String index) { SearchHit[] hits = new SearchHit[to + 1 - from]; for (int i = from; i <= to; i++) { - hits[i] = new SearchHit(i, i + "", new Text(""), Collections.emptyMap(), Collections.emptyMap()); + hits[i] = new SearchHit(i, i + "", Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE)); } return hits; diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java index ea9eadb0c9cbd..1c7a02dc27cf7 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -43,7 +42,6 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; @@ -127,7 +125,7 @@ public void testIgnoreUnlabeled() { rated.add(createRatedDoc("test", "1", RELEVANT_RATING)); // add an unlabeled search hit SearchHit[] searchHits = Arrays.copyOf(toSearchHits(rated, "test"), 3); - searchHits[2] = new SearchHit(2, "2", new Text(MapperService.SINGLE_MAPPING_NAME), Collections.emptyMap(), Collections.emptyMap()); + searchHits[2] = new SearchHit(2, "2", Collections.emptyMap(), Collections.emptyMap()); searchHits[2].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", searchHits, rated); @@ -146,7 +144,7 @@ public void testIgnoreUnlabeled() { public void testNoRatedDocs() throws Exception { SearchHit[] hits = new SearchHit[5]; for (int i = 0; i < 5; i++) { - hits[i] = new SearchHit(i, i + "", new Text(MapperService.SINGLE_MAPPING_NAME), Collections.emptyMap(), Collections.emptyMap()); + hits[i] = new SearchHit(i, i + "", Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); @@ -283,7 +281,7 @@ private static PrecisionAtK mutate(PrecisionAtK original) { private static SearchHit[] toSearchHits(List rated, String index) { SearchHit[] hits = new SearchHit[rated.size()]; for (int i = 0; i < rated.size(); i++) { - hits[i] = new SearchHit(i, i + "", new Text(""), Collections.emptyMap(), Collections.emptyMap()); + hits[i] = new SearchHit(i, i + "", Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE)); } return hits; diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java index 7e81dde0cab29..3d883b373d705 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java @@ -44,14 +44,12 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentLocation; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchParseException; @@ -188,9 +186,9 @@ public void testToXContent() throws IOException { + " \"coffee_query\": {" + " \"metric_score\": 0.1," + " \"unrated_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," - + " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"123\",\"_score\":1.0}," + + " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_id\":\"123\",\"_score\":1.0}," + " \"rating\":5}," - + " {\"hit\":{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"456\",\"_score\":1.0}," + + " {\"hit\":{\"_index\":\"index\",\"_id\":\"456\",\"_score\":1.0}," + " \"rating\":null}" + " ]" + " }" @@ -210,13 +208,7 @@ public void testToXContent() throws IOException { } private static RatedSearchHit searchHit(String index, int docId, Integer rating) { - SearchHit hit = new SearchHit( - docId, - docId + "", - new Text(MapperService.SINGLE_MAPPING_NAME), - Collections.emptyMap(), - Collections.emptyMap() - ); + SearchHit hit = new SearchHit(docId, docId + "", Collections.emptyMap(), Collections.emptyMap()); hit.shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE)); hit.score(1.0f); return new RatedSearchHit(hit, rating != null ? OptionalInt.of(rating) : OptionalInt.empty()); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java index bfc9098f59e43..555a0c95a3456 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java @@ -34,11 +34,9 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchTestCase; @@ -55,7 +53,6 @@ public static RatedSearchHit randomRatedSearchHit() { SearchHit searchHit = new SearchHit( randomIntBetween(0, 10), randomAlphaOfLength(10), - new Text(MapperService.SINGLE_MAPPING_NAME), Collections.emptyMap(), Collections.emptyMap() ); @@ -71,13 +68,7 @@ private static RatedSearchHit mutateTestItem(RatedSearchHit original) { rating = rating.isPresent() ? OptionalInt.of(rating.getAsInt() + 1) : OptionalInt.of(randomInt(5)); break; case 1: - hit = new SearchHit( - hit.docId(), - hit.getId() + randomAlphaOfLength(10), - new Text(MapperService.SINGLE_MAPPING_NAME), - Collections.emptyMap(), - Collections.emptyMap() - ); + hit = new SearchHit(hit.docId(), hit.getId() + randomAlphaOfLength(10), Collections.emptyMap(), Collections.emptyMap()); break; default: throw new IllegalStateException("The test should only allow two parameters mutated"); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java index 6ffaaa8e89e7b..6efb44a3875e1 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -128,7 +127,7 @@ public void testNoRatedDocs() throws Exception { int k = 5; SearchHit[] hits = new SearchHit[k]; for (int i = 0; i < k; i++) { - hits[i] = new SearchHit(i, i + "", new Text(""), Collections.emptyMap(), Collections.emptyMap()); + hits[i] = new SearchHit(i, i + "", Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } @@ -252,7 +251,7 @@ private static RecallAtK mutate(RecallAtK original) { private static SearchHit[] toSearchHits(List rated, String index) { SearchHit[] hits = new SearchHit[rated.size()]; for (int i = 0; i < rated.size(); i++) { - hits[i] = new SearchHit(i, i + "", new Text(""), Collections.emptyMap(), Collections.emptyMap()); + hits[i] = new SearchHit(i, i + "", Collections.emptyMap(), Collections.emptyMap()); hits[i].shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE)); } return hits; diff --git a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/10_basic.yml b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/10_basic.yml index 382b0789ba0ec..2ad583e03caaa 100644 --- a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/10_basic.yml +++ b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/10_basic.yml @@ -40,11 +40,6 @@ setup: --- "Response format": - - - skip: - version: " - 6.2.99" - reason: response format was updated in 6.3 - - do: rank_eval: index: foo, @@ -121,11 +116,6 @@ setup: --- "Mean Reciprocal Rank": - - - skip: - version: " - 6.2.99" - reason: response format was updated in 6.3 - - do: rank_eval: body: { @@ -160,11 +150,6 @@ setup: --- "Expected Reciprocal Rank": - - - skip: - version: " - 6.3.99" - reason: ERR was introduced in 6.4 - - do: rank_eval: body: { diff --git a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/20_dcg.yml b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/20_dcg.yml index 90094baabb9db..82005efcebe18 100644 --- a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/20_dcg.yml +++ b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/20_dcg.yml @@ -1,10 +1,5 @@ --- "Response format": - - - skip: - version: " - 6.1.99" - reason: the ranking evaluation feature is available since 6.2 - - do: index: index: foo diff --git a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/30_failures.yml b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/30_failures.yml index b9f55ed12ad7e..c88a769b8687b 100644 --- a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/30_failures.yml +++ b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/30_failures.yml @@ -1,10 +1,5 @@ --- "Response format": - - - skip: - version: " - 6.2.99" - reason: response format was updated in 6.3 - - do: index: index: foo diff --git a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml index 57d5aa5642ef6..08897e17ef900 100644 --- a/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml +++ b/modules/rank-eval/src/yamlRestTest/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml @@ -48,11 +48,6 @@ setup: --- "Basic rank-eval request with template": - - - skip: - version: " - 6.1.99" - reason: the ranking evaluation feature is available since 6.2 - - do: rank_eval: body: { diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java index b19de5150dfe8..827afdeb7ad86 100644 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java @@ -311,7 +311,7 @@ private ReindexRequestBuilder reindexAndPartiallyBlock() throws Exception { assertThat(ALLOWED_OPERATIONS.drainPermits(), equalTo(0)); ReindexRequestBuilder builder = new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source(INDEX_NAME) - .destination("target_index", "_doc"); + .destination("target_index"); // Scroll by 1 so that cancellation is easier to control builder.source().setSize(1); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 07d67290d8f2f..43adffc6f7671 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -60,7 +60,6 @@ import org.opensearch.index.mapper.IndexFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.VersionFieldMapper; import org.opensearch.index.reindex.ScrollableHitSource.SearchFailure; import org.opensearch.script.Script; @@ -249,7 +248,7 @@ protected boolean accept(ScrollableHitSource.Hit doc) { * change the "fields" part of the search request it is unlikely that we got here because we didn't fetch _source. * Thus the error message assumes that it wasn't stored. */ - throw new IllegalArgumentException("[" + doc.getIndex() + "][" + doc.getType() + "][" + doc.getId() + "] didn't store _source"); + throw new IllegalArgumentException("[" + doc.getIndex() + "][" + doc.getId() + "] didn't store _source"); } return true; } @@ -597,10 +596,6 @@ public interface RequestWrapper> { String getIndex(); - void setType(String type); - - String getType(); - void setId(String id); String getId(); @@ -643,16 +638,6 @@ public String getIndex() { return request.index(); } - @Override - public void setType(String type) { - request.type(type); - } - - @Override - public String getType() { - return request.type(); - } - @Override public void setId(String id) { request.id(id); @@ -732,16 +717,6 @@ public String getIndex() { return request.index(); } - @Override - public void setType(String type) { - request.type(type); - } - - @Override - public String getType() { - return request.type(); - } - @Override public void setId(String id) { request.id(id); @@ -831,7 +806,6 @@ public RequestWrapper apply(RequestWrapper request, ScrollableHitSource.Hi Map context = new HashMap<>(); context.put(IndexFieldMapper.NAME, doc.getIndex()); - context.put(TypeFieldMapper.NAME, doc.getType()); context.put(IdFieldMapper.NAME, doc.getId()); Long oldVersion = doc.getVersion(); context.put(VersionFieldMapper.NAME, oldVersion); @@ -861,10 +835,6 @@ public RequestWrapper apply(RequestWrapper request, ScrollableHitSource.Hi if (false == doc.getIndex().equals(newValue)) { scriptChangedIndex(request, newValue); } - newValue = context.remove(TypeFieldMapper.NAME); - if (false == doc.getType().equals(newValue)) { - scriptChangedType(request, newValue); - } newValue = context.remove(IdFieldMapper.NAME); if (false == doc.getId().equals(newValue)) { scriptChangedId(request, newValue); @@ -899,7 +869,7 @@ protected RequestWrapper scriptChangedOpType(RequestWrapper request, OpTyp taskWorker.countNoop(); return null; case DELETE: - RequestWrapper delete = wrap(new DeleteRequest(request.getIndex(), request.getType(), request.getId())); + RequestWrapper delete = wrap(new DeleteRequest(request.getIndex(), request.getId())); delete.setVersion(request.getVersion()); delete.setVersionType(VersionType.INTERNAL); delete.setRouting(request.getRouting()); @@ -911,8 +881,6 @@ protected RequestWrapper scriptChangedOpType(RequestWrapper request, OpTyp protected abstract void scriptChangedIndex(RequestWrapper request, Object to); - protected abstract void scriptChangedType(RequestWrapper request, Object to); - protected abstract void scriptChangedId(RequestWrapper request, Object to); protected abstract void scriptChangedVersion(RequestWrapper request, Object to); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java index ac1a7c22a4d2f..1a9ce16acc255 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java @@ -67,7 +67,6 @@ protected boolean accept(ScrollableHitSource.Hit doc) { protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) { DeleteRequest delete = new DeleteRequest(); delete.index(doc.getIndex()); - delete.type(doc.getType()); delete.id(doc.getId()); delete.setIfSeqNo(doc.getSeqNo()); delete.setIfPrimaryTerm(doc.getPrimaryTerm()); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java index 0037e1d06a115..8ade055d10f60 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java @@ -352,13 +352,6 @@ protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) // Copy the index from the request so we always write where it asked to write index.index(mainRequest.getDestination().index()); - // If the request override's type then the user wants all documents in that type. Otherwise keep the doc's type. - if (mainRequest.getDestination().type() == null) { - index.type(doc.getType()); - } else { - index.type(mainRequest.getDestination().type()); - } - /* * Internal versioning can just use what we copied from the destination request. Otherwise we assume we're using external * versioning and use the doc's version. @@ -460,12 +453,6 @@ protected void scriptChangedIndex(RequestWrapper request, Object to) { request.setIndex(to.toString()); } - @Override - protected void scriptChangedType(RequestWrapper request, Object to) { - requireNonNull(to, "Can't reindex without a destination type!"); - request.setType(to.toString()); - } - @Override protected void scriptChangedId(RequestWrapper request, Object to) { request.setId(Objects.toString(to, null)); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java index 25fd1a250d362..f07915b9d9e76 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java @@ -46,7 +46,6 @@ import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IndexFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.script.Script; import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; @@ -138,7 +137,6 @@ public BiFunction, ScrollableHitSource.Hit, RequestWrapper> protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) { IndexRequest index = new IndexRequest(); index.index(doc.getIndex()); - index.type(doc.getType()); index.id(doc.getId()); index.source(doc.getSource(), doc.getXContentType()); index.setIfSeqNo(doc.getSeqNo()); @@ -163,11 +161,6 @@ protected void scriptChangedIndex(RequestWrapper request, Object to) { throw new IllegalArgumentException("Modifying [" + IndexFieldMapper.NAME + "] not allowed"); } - @Override - protected void scriptChangedType(RequestWrapper request, Object to) { - throw new IllegalArgumentException("Modifying [" + TypeFieldMapper.NAME + "] not allowed"); - } - @Override protected void scriptChangedId(RequestWrapper request, Object to) { throw new IllegalArgumentException("Modifying [" + IdFieldMapper.NAME + "] not allowed"); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java index 69ec2e8b852cb..8467fbdeacd0e 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java @@ -79,7 +79,6 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, // It is nasty to build paths with StringBuilder but we'll be careful.... StringBuilder path = new StringBuilder("/"); addIndices(path, searchRequest.indices()); - addTypes(path, searchRequest.types()); path.append("_search"); Request request = new Request("POST", path.toString()); @@ -210,16 +209,6 @@ private static String encodeIndex(String s) { } } - private static void addTypes(StringBuilder path, String[] types) { - if (types == null || types.length == 0) { - return; - } - for (String indexOrType : types) { - checkIndexOrType("Type", indexOrType); - } - path.append(Strings.arrayToCommaDelimitedString(types)).append('/'); - } - private static void checkIndexOrType(String name, String indexOrType) { if (indexOrType.indexOf(',') >= 0) { throw new IllegalArgumentException(name + " containing [,] not supported but got [" + indexOrType + "]"); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java index 4c57872462f0b..d22b995036e90 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java @@ -75,14 +75,12 @@ private RemoteResponseParsers() {} public static final ConstructingObjectParser HIT_PARSER = new ConstructingObjectParser<>("hit", true, a -> { int i = 0; String index = (String) a[i++]; - String type = (String) a[i++]; String id = (String) a[i++]; Long version = (Long) a[i++]; - return new BasicHit(index, type, id, version == null ? -1 : version); + return new BasicHit(index, id, version == null ? -1 : version); }); static { HIT_PARSER.declareString(constructorArg(), new ParseField("_index")); - HIT_PARSER.declareString(constructorArg(), new ParseField("_type")); HIT_PARSER.declareString(constructorArg(), new ParseField("_id")); HIT_PARSER.declareLong(optionalConstructorArg(), new ParseField("_version")); HIT_PARSER.declareObject(((basicHit, tuple) -> basicHit.setSource(tuple.v1(), tuple.v2())), (p, s) -> { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionMetadataTestCase.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionMetadataTestCase.java index a71381d968ca8..003f3b0824602 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionMetadataTestCase.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionMetadataTestCase.java @@ -37,7 +37,7 @@ public abstract class AbstractAsyncBulkByScrollActionMetadataTestCase< Response extends BulkByScrollResponse> extends AbstractAsyncBulkByScrollActionTestCase { protected ScrollableHitSource.BasicHit doc() { - return new ScrollableHitSource.BasicHit("index", "type", "id", 0); + return new ScrollableHitSource.BasicHit("index", "id", 0); } protected abstract AbstractAsyncBulkByScrollAction action(); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java index 3c19edc89c865..671faef6c5545 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java @@ -65,8 +65,8 @@ public void setupScriptService() { @SuppressWarnings("unchecked") protected T applyScript(Consumer> scriptBody) { - IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar")); - ScrollableHitSource.Hit doc = new ScrollableHitSource.BasicHit("test", "type", "id", 0); + IndexRequest index = new IndexRequest("index").id("1").source(singletonMap("foo", "bar")); + ScrollableHitSource.Hit doc = new ScrollableHitSource.BasicHit("test", "id", 0); UpdateScript.Factory factory = (params, ctx) -> new UpdateScript(Collections.emptyMap(), ctx) { @Override public void execute() { @@ -79,11 +79,6 @@ public void execute() { return (result != null) ? (T) result.self() : null; } - public void testTypeDeprecation() { - applyScript((Map ctx) -> ctx.get("_type")); - assertWarnings("[types removal] Looking up doc types [_type] in scripts is deprecated."); - } - public void testScriptAddingJunkToCtxIsError() { try { applyScript((Map ctx) -> ctx.put("junk", "junk")); @@ -102,16 +97,9 @@ public void testChangeSource() { assertEquals("cat", index.sourceAsMap().get("bar")); } - public void testSetOpTypeNoop() throws Exception { - assertThat(task.getStatus().getNoops(), equalTo(0L)); - assertNull(applyScript((Map ctx) -> ctx.put("op", OpType.NOOP.toString()))); - assertThat(task.getStatus().getNoops(), equalTo(1L)); - } - public void testSetOpTypeDelete() throws Exception { DeleteRequest delete = applyScript((Map ctx) -> ctx.put("op", OpType.DELETE.toString())); assertThat(delete.index(), equalTo("index")); - assertThat(delete.type(), equalTo("type")); assertThat(delete.id(), equalTo("1")); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java index 1dd758150c392..9c2e44f580628 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -73,7 +73,6 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.text.Text; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; @@ -288,7 +287,7 @@ public void testScrollResponseSetsTotal() { public void testScrollResponseBatchingBehavior() throws Exception { int maxBatches = randomIntBetween(0, 100); for (int batches = 1; batches < maxBatches; batches++) { - Hit hit = new ScrollableHitSource.BasicHit("index", "type", "id", 0); + Hit hit = new ScrollableHitSource.BasicHit("index", "id", 0); ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 1, singletonList(hit), null); DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction(); simulateScrollResponse(action, System.nanoTime(), 0, response); @@ -315,7 +314,7 @@ public void testBulkResponseSetsLotsOfStatus() throws Exception { responses[i] = new BulkItemResponse( i, randomFrom(DocWriteRequest.OpType.values()), - new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "id", "test")) + new Failure(shardId.getIndexName(), "id" + i, new VersionConflictEngineException(shardId, "id", "test")) ); continue; } @@ -342,15 +341,7 @@ public void testBulkResponseSetsLotsOfStatus() throws Exception { } final int seqNo = randomInt(20); final int primaryTerm = randomIntBetween(1, 16); - final IndexResponse response = new IndexResponse( - shardId, - "type", - "id" + i, - seqNo, - primaryTerm, - randomInt(), - createdResponse - ); + final IndexResponse response = new IndexResponse(shardId, "id" + i, seqNo, primaryTerm, randomInt(), createdResponse); responses[i] = new BulkItemResponse(i, opType, response); } assertExactlyOnce(onSuccess -> new DummyAsyncBulkByScrollAction().onBulkResponse(new BulkResponse(responses, 0), onSuccess)); @@ -433,7 +424,7 @@ public void testSearchTimeoutsAbortRequest() throws Exception { * Mimicks bulk indexing failures. */ public void testBulkFailuresAbortRequest() throws Exception { - Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); + Failure failure = new Failure("index", "id", new RuntimeException("test")); DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction(); BulkResponse bulkResponse = new BulkResponse( new BulkItemResponse[] { new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, failure) }, @@ -456,7 +447,7 @@ protected AbstractAsyncBulkByScrollAction.RequestWrapper buildRequest(Hit doc throw new RuntimeException("surprise"); } }; - ScrollableHitSource.BasicHit hit = new ScrollableHitSource.BasicHit("index", "type", "id", 0); + ScrollableHitSource.BasicHit hit = new ScrollableHitSource.BasicHit("index", "id", 0); hit.setSource(new BytesArray("{}"), XContentType.JSON); ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 1, singletonList(hit), null); simulateScrollResponse(action, System.nanoTime(), 0, response); @@ -541,7 +532,7 @@ protected RequestWrapper buildRequest(Hit doc) { action.start(); // create a simulated response. - SearchHit hit = new SearchHit(0, "id", new Text("type"), emptyMap(), emptyMap()).sourceRef(new BytesArray("{}")); + SearchHit hit = new SearchHit(0, "id", emptyMap(), emptyMap()).sourceRef(new BytesArray("{}")); SearchHits hits = new SearchHits( IntStream.range(0, 100).mapToObj(i -> hit).toArray(SearchHit[]::new), new TotalHits(0, TotalHits.Relation.EQUAL_TO), @@ -597,7 +588,7 @@ private void bulkRetryTestCase(boolean failWithRejection) throws Exception { DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff(); BulkRequest request = new BulkRequest(); for (int i = 0; i < size + 1; i++) { - request.add(new IndexRequest("index", "type", "id" + i)); + request.add(new IndexRequest("index").id("id" + i)); } if (failWithRejection) { action.sendBulkRequest(request, Assert::fail); @@ -946,7 +937,6 @@ protected void IndexRequest index = (IndexRequest) item; response = new IndexResponse( shardId, - index.type(), index.id() == null ? "dummy_id" : index.id(), randomInt(20), randomIntBetween(1, 16), @@ -957,7 +947,6 @@ protected void UpdateRequest update = (UpdateRequest) item; response = new UpdateResponse( shardId, - update.type(), update.id(), randomNonNegativeLong(), randomIntBetween(1, Integer.MAX_VALUE), @@ -968,7 +957,6 @@ protected void DeleteRequest delete = (DeleteRequest) item; response = new DeleteResponse( shardId, - delete.type(), delete.id(), randomInt(20), randomIntBetween(1, 16), @@ -982,12 +970,7 @@ protected void responses[i] = new BulkItemResponse( i, item.opType(), - new Failure( - response.getIndex(), - response.getType(), - response.getId(), - new OpenSearchRejectedExecutionException() - ) + new Failure(response.getIndex(), response.getId(), new OpenSearchRejectedExecutionException()) ); } else { responses[i] = new BulkItemResponse(i, item.opType(), response); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/BulkIndexByScrollResponseTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/BulkIndexByScrollResponseTests.java index d2cb565547875..cd0ee066aec7f 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/BulkIndexByScrollResponseTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/BulkIndexByScrollResponseTests.java @@ -80,7 +80,7 @@ public void testMergeConstructor() { List bulkFailures = frequently() ? emptyList() : IntStream.range(0, between(1, 3)) - .mapToObj(j -> new BulkItemResponse.Failure("idx", "type", "id", new Exception())) + .mapToObj(j -> new BulkItemResponse.Failure("idx", "id", new Exception())) .collect(Collectors.toList()); allBulkFailures.addAll(bulkFailures); List searchFailures = frequently() diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java index 1bab1db908ca9..bd43f05225f65 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java @@ -77,7 +77,6 @@ public class CancelTests extends ReindexTestCase { protected static final String INDEX = "reindex-cancel-index"; - protected static final String TYPE = "reindex-cancel-type"; // Semaphore used to allow & block indexing operations during the test private static final Semaphore ALLOWED_OPERATIONS = new Semaphore(0); @@ -116,7 +115,7 @@ private void testCancel( false, true, IntStream.range(0, numDocs) - .mapToObj(i -> client().prepareIndex(INDEX, TYPE, String.valueOf(i)).setSource("n", i)) + .mapToObj(i -> client().prepareIndex().setIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) .collect(Collectors.toList()) ); @@ -247,12 +246,12 @@ public static TaskInfo findTaskToCancel(String actionName, int workerCount) { } public void testReindexCancel() throws Exception { - testCancel(ReindexAction.NAME, reindex().source(INDEX).destination("dest", TYPE), (response, total, modified) -> { + testCancel(ReindexAction.NAME, reindex().source(INDEX).destination("dest"), (response, total, modified) -> { assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request"))); refresh("dest"); - assertHitCount(client().prepareSearch("dest").setTypes(TYPE).setSize(0).get(), modified); - }, equalTo("reindex from [" + INDEX + "] to [dest][" + TYPE + "]")); + assertHitCount(client().prepareSearch("dest").setSize(0).get(), modified); + }, equalTo("reindex from [" + INDEX + "] to [dest]")); } public void testUpdateByQueryCancel() throws Exception { @@ -289,13 +288,13 @@ public void testDeleteByQueryCancel() throws Exception { public void testReindexCancelWithWorkers() throws Exception { testCancel( ReindexAction.NAME, - reindex().source(INDEX).filter(QueryBuilders.matchAllQuery()).destination("dest", TYPE).setSlices(5), + reindex().source(INDEX).filter(QueryBuilders.matchAllQuery()).destination("dest").setSlices(5), (response, total, modified) -> { assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); refresh("dest"); - assertHitCount(client().prepareSearch("dest").setTypes(TYPE).setSize(0).get(), modified); + assertHitCount(client().prepareSearch("dest").setSize(0).get(), modified); }, - equalTo("reindex from [" + INDEX + "] to [dest][" + TYPE + "]") + equalTo("reindex from [" + INDEX + "] to [dest]") ); } @@ -355,16 +354,16 @@ public static class BlockingOperationListener implements IndexingOperationListen @Override public Engine.Index preIndex(ShardId shardId, Engine.Index index) { - return preCheck(index, index.type()); + return preCheck(index); } @Override public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { - return preCheck(delete, delete.type()); + return preCheck(delete); } - private T preCheck(T operation, String type) { - if ((TYPE.equals(type) == false) || (operation.origin() != Origin.PRIMARY)) { + private T preCheck(T operation) { + if ((operation.origin() != Origin.PRIMARY)) { return operation; } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java index e0c8bf604ed27..8af217e5140e1 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java @@ -47,7 +47,6 @@ import org.opensearch.client.support.AbstractClient; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.text.Text; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.search.SearchHit; @@ -183,7 +182,7 @@ public void testScrollKeepAlive() { private SearchResponse createSearchResponse() { // create a simulated response. - SearchHit hit = new SearchHit(0, "id", new Text("type"), emptyMap(), emptyMap()).sourceRef(new BytesArray("{}")); + SearchHit hit = new SearchHit(0, "id", emptyMap(), emptyMap()).sourceRef(new BytesArray("{}")); SearchHits hits = new SearchHits( IntStream.range(0, randomIntBetween(0, 20)).mapToObj(i -> hit).toArray(SearchHit[]::new), new TotalHits(0, TotalHits.Relation.EQUAL_TO), diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java index 13ca95c01b72d..870f3620062f7 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java @@ -83,25 +83,25 @@ public void testBasics() throws Exception { client().prepareIndex("test", "test", "7").setSource("foo", "f") ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 7); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 7); // Deletes two docs that matches "foo:a" assertThat(deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).get(), matcher().deleted(2)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 5); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 5); // Deletes the two first docs with limit by size DeleteByQueryRequestBuilder request = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).size(2).refresh(true); request.source().addSort("foo.keyword", SortOrder.ASC); assertThat(request.get(), matcher().deleted(2)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 3); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 3); // Deletes but match no docs assertThat(deleteByQuery().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(), matcher().deleted(0)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 3); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 3); // Deletes all remaining docs assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get(), matcher().deleted(3)); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); } public void testDeleteByQueryWithOneIndex() throws Exception { @@ -319,7 +319,7 @@ public void testSlices() throws Exception { client().prepareIndex("test", "test", "6").setSource("foo", "e"), client().prepareIndex("test", "test", "7").setSource("foo", "f") ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 7); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 7); int slices = randomSlices(); int expectedSlices = expectedSliceStatuses(slices, "test"); @@ -329,14 +329,14 @@ public void testSlices() throws Exception { deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).setSlices(slices).get(), matcher().deleted(2).slices(hasSize(expectedSlices)) ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 5); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 5); // Delete remaining docs assertThat( deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).setSlices(slices).get(), matcher().deleted(5).slices(hasSize(expectedSlices)) ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); } public void testMultipleSources() throws Exception { @@ -369,7 +369,7 @@ public void testMultipleSources() throws Exception { ); for (String index : docs.keySet()) { - assertHitCount(client().prepareSearch(index).setTypes("test").setSize(0).get(), 0); + assertHitCount(client().prepareSearch(index).setSize(0).get(), 0); } } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java index 581cb19b0dd8b..652e4d4d34fd5 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java @@ -59,23 +59,23 @@ public void testFiltering() throws Exception { assertHitCount(client().prepareSearch("source").setSize(0).get(), 4); // Copy all the docs - ReindexRequestBuilder copy = reindex().source("source").destination("dest", "type").refresh(true); + ReindexRequestBuilder copy = reindex().source("source").destination("dest").refresh(true); assertThat(copy.get(), matcher().created(4)); assertHitCount(client().prepareSearch("dest").setSize(0).get(), 4); // Now none of them createIndex("none"); - copy = reindex().source("source").destination("none", "type").filter(termQuery("foo", "no_match")).refresh(true); + copy = reindex().source("source").destination("none").filter(termQuery("foo", "no_match")).refresh(true); assertThat(copy.get(), matcher().created(0)); assertHitCount(client().prepareSearch("none").setSize(0).get(), 0); // Now half of them - copy = reindex().source("source").destination("dest_half", "type").filter(termQuery("foo", "a")).refresh(true); + copy = reindex().source("source").destination("dest_half").filter(termQuery("foo", "a")).refresh(true); assertThat(copy.get(), matcher().created(2)); assertHitCount(client().prepareSearch("dest_half").setSize(0).get(), 2); // Limit with maxDocs - copy = reindex().source("source").destination("dest_size_one", "type").maxDocs(1).refresh(true); + copy = reindex().source("source").destination("dest_size_one").maxDocs(1).refresh(true); assertThat(copy.get(), matcher().created(1)); assertHitCount(client().prepareSearch("dest_size_one").setSize(0).get(), 1); } @@ -91,7 +91,7 @@ public void testCopyMany() throws Exception { assertHitCount(client().prepareSearch("source").setSize(0).get(), max); // Copy all the docs - ReindexRequestBuilder copy = reindex().source("source").destination("dest", "type").refresh(true); + ReindexRequestBuilder copy = reindex().source("source").destination("dest").refresh(true); // Use a small batch size so we have to use more than one batch copy.source().setSize(5); assertThat(copy.get(), matcher().created(max).batches(max, 5)); @@ -99,7 +99,7 @@ public void testCopyMany() throws Exception { // Copy some of the docs int half = max / 2; - copy = reindex().source("source").destination("dest_half", "type").refresh(true); + copy = reindex().source("source").destination("dest_half").refresh(true); // Use a small batch size so we have to use more than one batch copy.source().setSize(5); copy.maxDocs(half); @@ -121,15 +121,15 @@ public void testCopyManyWithSlices() throws Exception { int expectedSlices = expectedSliceStatuses(slices, "source"); // Copy all the docs - ReindexRequestBuilder copy = reindex().source("source").destination("dest", "type").refresh(true).setSlices(slices); + ReindexRequestBuilder copy = reindex().source("source").destination("dest").refresh(true).setSlices(slices); // Use a small batch size so we have to use more than one batch copy.source().setSize(5); assertThat(copy.get(), matcher().created(max).batches(greaterThanOrEqualTo(max / 5)).slices(hasSize(expectedSlices))); - assertHitCount(client().prepareSearch("dest").setTypes("type").setSize(0).get(), max); + assertHitCount(client().prepareSearch("dest").setSize(0).get(), max); // Copy some of the docs int half = max / 2; - copy = reindex().source("source").destination("dest_half", "type").refresh(true).setSlices(slices); + copy = reindex().source("source").destination("dest_half").refresh(true).setSlices(slices); // Use a small batch size so we have to use more than one batch copy.source().setSize(5); copy.maxDocs(half); @@ -162,7 +162,7 @@ public void testMultipleSources() throws Exception { int expectedSlices = expectedSliceStatuses(slices, docs.keySet()); String[] sourceIndexNames = docs.keySet().toArray(new String[docs.size()]); - ReindexRequestBuilder request = reindex().source(sourceIndexNames).destination("dest", "type").refresh(true).setSlices(slices); + ReindexRequestBuilder request = reindex().source(sourceIndexNames).destination("dest").refresh(true).setSlices(slices); BulkByScrollResponse response = request.get(); assertThat(response, matcher().created(allDocs.size()).slices(hasSize(expectedSlices))); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java index bd6eba132af21..85f0c3c24abee 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java @@ -60,20 +60,6 @@ public void testSettingIndexToNullIsError() throws Exception { } } - public void testSetType() throws Exception { - Object type = randomFrom(new Object[] { 234, 234L, "pancake" }); - IndexRequest index = applyScript((Map ctx) -> ctx.put("_type", type)); - assertEquals(type.toString(), index.type()); - } - - public void testSettingTypeToNullIsError() throws Exception { - try { - applyScript((Map ctx) -> ctx.put("_type", null)); - } catch (NullPointerException e) { - assertThat(e.getMessage(), containsString("Can't reindex without a destination type!")); - } - } - public void testSetId() throws Exception { Object id = randomFrom(new Object[] { null, 234, 234L, "pancake" }); IndexRequest index = applyScript((Map ctx) -> ctx.put("_id", id)); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexVersioningTests.java index 7181fa9f4d273..e516be131e6a4 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexVersioningTests.java @@ -130,7 +130,7 @@ private void setupSourceAbsent() throws Exception { client().prepareIndex("source", "_doc", "test").setVersionType(EXTERNAL).setVersion(SOURCE_VERSION).setSource("foo", "source") ); - assertEquals(SOURCE_VERSION, client().prepareGet("source", "_doc", "test").get().getVersion()); + assertEquals(SOURCE_VERSION, client().prepareGet("source", "test").get().getVersion()); } private void setupDest(int version) throws Exception { @@ -140,7 +140,7 @@ private void setupDest(int version) throws Exception { client().prepareIndex("dest", "_doc", "test").setVersionType(EXTERNAL).setVersion(version).setSource("foo", "dest") ); - assertEquals(version, client().prepareGet("dest", "_doc", "test").get().getVersion()); + assertEquals(version, client().prepareGet("dest", "test").get().getVersion()); } private void setupDestOlder() throws Exception { @@ -152,7 +152,7 @@ private void setupDestNewer() throws Exception { } private void assertDest(String fooValue, int version) { - GetResponse get = client().prepareGet("dest", "_doc", "test").get(); + GetResponse get = client().prepareGet("dest", "test").get(); assertEquals(fooValue, get.getSource().get("foo")); assertEquals(version, get.getVersion()); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestDeleteByQueryActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestDeleteByQueryActionTests.java index 5341bcd0fee5d..11e1f6b478fe3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestDeleteByQueryActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestDeleteByQueryActionTests.java @@ -33,8 +33,6 @@ package org.opensearch.index.reindex; import org.opensearch.common.xcontent.NamedXContentRegistry; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; @@ -52,26 +50,6 @@ public void setUpAction() { controller().registerHandler(action); } - public void testTypeInPath() throws IOException { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) - .withPath("/some_index/some_type/_delete_by_query") - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - - // checks the type in the URL is propagated correctly to the request object - // only works after the request is dispatched, so its params are filled from url. - DeleteByQueryRequest dbqRequest = action.buildRequest(request, DEFAULT_NAMED_WRITABLE_REGISTRY); - assertArrayEquals(new String[] { "some_type" }, dbqRequest.getDocTypes()); - - // RestDeleteByQueryAction itself doesn't check for a deprecated type usage - // checking here for a deprecation from its internal search request - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } - public void testParseEmpty() throws IOException { final FakeRestRequest restRequest = new FakeRestRequest.Builder(new NamedXContentRegistry(emptyList())).build(); DeleteByQueryRequest request = action.buildRequest(restRequest, DEFAULT_NAMED_WRITABLE_REGISTRY); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java index 508cfefa1679c..aa8221b045d3f 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java @@ -38,13 +38,11 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.rest.RestRequest.Method; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import static java.util.Collections.singletonMap; @@ -102,52 +100,4 @@ public void testSetScrollTimeout() throws IOException { assertEquals("10m", request.getScrollTime().toString()); } } - - /** - * test deprecation is logged if one or more types are used in source search request inside reindex - */ - public void testTypeInSource() throws IOException { - FakeRestRequest.Builder requestBuilder = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.POST) - .withPath("/_reindex"); - XContentBuilder b = JsonXContent.contentBuilder().startObject(); - { - b.startObject("source"); - { - b.field("type", randomFrom(Arrays.asList("\"t1\"", "[\"t1\", \"t2\"]", "\"_doc\""))); - } - b.endObject(); - } - b.endObject(); - requestBuilder.withContent(new BytesArray(BytesReference.bytes(b).toBytesRef()), XContentType.JSON); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(requestBuilder.build()); - assertWarnings(ReindexRequest.TYPES_DEPRECATION_MESSAGE); - } - - /** - * test deprecation is logged if a type is used in the destination index request inside reindex - */ - public void testTypeInDestination() throws IOException { - FakeRestRequest.Builder requestBuilder = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.POST) - .withPath("/_reindex"); - XContentBuilder b = JsonXContent.contentBuilder().startObject(); - { - b.startObject("dest"); - { - b.field("type", (randomBoolean() ? "_doc" : randomAlphaOfLength(4))); - } - b.endObject(); - } - b.endObject(); - requestBuilder.withContent(new BytesArray(BytesReference.bytes(b).toBytesRef()), XContentType.JSON); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(requestBuilder.build()); - assertWarnings(ReindexRequest.TYPES_DEPRECATION_MESSAGE); - } } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestUpdateByQueryActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestUpdateByQueryActionTests.java index 743f0e8a852f4..ef5a94f2e1798 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestUpdateByQueryActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestUpdateByQueryActionTests.java @@ -33,8 +33,6 @@ package org.opensearch.index.reindex; import org.opensearch.common.xcontent.NamedXContentRegistry; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; @@ -53,26 +51,6 @@ public void setUpAction() { controller().registerHandler(action); } - public void testTypeInPath() throws IOException { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) - .withPath("/some_index/some_type/_update_by_query") - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - - // checks the type in the URL is propagated correctly to the request object - // only works after the request is dispatched, so its params are filled from url. - UpdateByQueryRequest ubqRequest = action.buildRequest(request, DEFAULT_NAMED_WRITABLE_REGISTRY); - assertArrayEquals(new String[] { "some_type" }, ubqRequest.getDocTypes()); - - // RestUpdateByQueryAction itself doesn't check for a deprecated type usage - // checking here for a deprecation from its internal search request - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } - public void testParseEmpty() throws IOException { final FakeRestRequest restRequest = new FakeRestRequest.Builder(new NamedXContentRegistry(emptyList())).build(); UpdateByQueryRequest request = action.buildRequest(restRequest, DEFAULT_NAMED_WRITABLE_REGISTRY); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java index d803eff25d081..3ed1f7b563546 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java @@ -55,36 +55,36 @@ public void testBasics() throws Exception { client().prepareIndex("test", "test", "3").setSource("foo", "b"), client().prepareIndex("test", "test", "4").setSource("foo", "c") ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4); - assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion()); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 4); + assertEquals(1, client().prepareGet("test", "1").get().getVersion()); + assertEquals(1, client().prepareGet("test", "4").get().getVersion()); // Reindex all the docs assertThat(updateByQuery().source("test").refresh(true).get(), matcher().updated(4)); - assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + assertEquals(2, client().prepareGet("test", "1").get().getVersion()); + assertEquals(2, client().prepareGet("test", "4").get().getVersion()); // Now none of them assertThat(updateByQuery().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(), matcher().updated(0)); - assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + assertEquals(2, client().prepareGet("test", "1").get().getVersion()); + assertEquals(2, client().prepareGet("test", "4").get().getVersion()); // Now half of them assertThat(updateByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).get(), matcher().updated(2)); - assertEquals(3, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(3, client().prepareGet("test", "test", "2").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "3").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + assertEquals(3, client().prepareGet("test", "1").get().getVersion()); + assertEquals(3, client().prepareGet("test", "2").get().getVersion()); + assertEquals(2, client().prepareGet("test", "3").get().getVersion()); + assertEquals(2, client().prepareGet("test", "4").get().getVersion()); // Limit with size UpdateByQueryRequestBuilder request = updateByQuery().source("test").size(3).refresh(true); request.source().addSort("foo.keyword", SortOrder.ASC); assertThat(request.get(), matcher().updated(3)); // Only the first three documents are updated because of sort - assertEquals(4, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(4, client().prepareGet("test", "test", "2").get().getVersion()); - assertEquals(3, client().prepareGet("test", "test", "3").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + assertEquals(4, client().prepareGet("test", "1").get().getVersion()); + assertEquals(4, client().prepareGet("test", "2").get().getVersion()); + assertEquals(3, client().prepareGet("test", "3").get().getVersion()); + assertEquals(2, client().prepareGet("test", "4").get().getVersion()); } public void testSlices() throws Exception { @@ -95,9 +95,9 @@ public void testSlices() throws Exception { client().prepareIndex("test", "test", "3").setSource("foo", "b"), client().prepareIndex("test", "test", "4").setSource("foo", "c") ); - assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4); - assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion()); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 4); + assertEquals(1, client().prepareGet("test", "1").get().getVersion()); + assertEquals(1, client().prepareGet("test", "4").get().getVersion()); int slices = randomSlices(2, 10); int expectedSlices = expectedSliceStatuses(slices, "test"); @@ -107,26 +107,26 @@ public void testSlices() throws Exception { updateByQuery().source("test").refresh(true).setSlices(slices).get(), matcher().updated(4).slices(hasSize(expectedSlices)) ); - assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + assertEquals(2, client().prepareGet("test", "1").get().getVersion()); + assertEquals(2, client().prepareGet("test", "4").get().getVersion()); // Now none of them assertThat( updateByQuery().source("test").filter(termQuery("foo", "no_match")).setSlices(slices).refresh(true).get(), matcher().updated(0).slices(hasSize(expectedSlices)) ); - assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + assertEquals(2, client().prepareGet("test", "1").get().getVersion()); + assertEquals(2, client().prepareGet("test", "4").get().getVersion()); // Now half of them assertThat( updateByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).setSlices(slices).get(), matcher().updated(2).slices(hasSize(expectedSlices)) ); - assertEquals(3, client().prepareGet("test", "test", "1").get().getVersion()); - assertEquals(3, client().prepareGet("test", "test", "2").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "3").get().getVersion()); - assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + assertEquals(3, client().prepareGet("test", "1").get().getVersion()); + assertEquals(3, client().prepareGet("test", "2").get().getVersion()); + assertEquals(2, client().prepareGet("test", "3").get().getVersion()); + assertEquals(2, client().prepareGet("test", "4").get().getVersion()); } public void testMultipleSources() throws Exception { @@ -159,7 +159,7 @@ public void testMultipleSources() throws Exception { String index = entry.getKey(); List indexDocs = entry.getValue(); int randomDoc = between(0, indexDocs.size() - 1); - assertEquals(2, client().prepareGet(index, "test", Integer.toString(randomDoc)).get().getVersion()); + assertEquals(2, client().prepareGet(index, Integer.toString(randomDoc)).get().getVersion()); } } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java index 3685fc5f124c9..3e4c61432c34a 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java @@ -76,7 +76,7 @@ public void testUpdateWhileReindexing() throws Exception { try { for (int i = 0; i < MAX_MUTATIONS; i++) { - GetResponse get = client().prepareGet("test", "test", "test").get(); + GetResponse get = client().prepareGet("test", "test").get(); assertEquals(value.get(), get.getSource().get("test")); value.set(randomSimpleString(random())); IndexRequestBuilder index = client().prepareIndex("test", "test", "test") @@ -106,7 +106,7 @@ public void testUpdateWhileReindexing() throws Exception { get.getVersion(), attempts ); - get = client().prepareGet("test", "test", "test").get(); + get = client().prepareGet("test", "test").get(); } } } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWithScriptTests.java index b72f66ce11277..ce982dcb6bd34 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -56,7 +56,7 @@ public void testModifyingCtxNotAllowed() { * error message to the user, not some ClassCastException. */ Object[] options = new Object[] { "cat", new Object(), 123, new Date(), Math.PI }; - for (String ctxVar : new String[] { "_index", "_type", "_id", "_version", "_routing" }) { + for (String ctxVar : new String[] { "_index", "_id", "_version", "_routing" }) { try { applyScript((Map ctx) -> ctx.put(ctxVar, randomFrom(options))); } catch (IllegalArgumentException e) { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java index 541134f9403ba..c349bc54bcbd9 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -78,27 +78,25 @@ public void testIntialSearchPath() { SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); assertEquals("/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("a"); - searchRequest.types("b"); - assertEquals("/a/b/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/a/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("a", "b"); - searchRequest.types("c", "d"); - assertEquals("/a,b/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/a,b/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("cat,"); - assertEquals("/cat%2C/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/cat%2C/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("cat/"); - assertEquals("/cat%2F/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/cat%2F/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); searchRequest.indices("cat/", "dog"); - assertEquals("/cat%2F,dog/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/cat%2F,dog/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); // test a specific date math + all characters that need escaping. searchRequest.indices("", "<>/{}|+:,"); assertEquals( - "/%3Ccat%7Bnow%2Fd%7D%3E,%3C%3E%2F%7B%7D%7C%2B%3A%2C/c,d/_search", + "/%3Ccat%7Bnow%2Fd%7D%3E,%3C%3E%2F%7B%7D%7C%2B%3A%2C/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint() ); // pass-through if already escaped. searchRequest.indices("%2f", "%3a"); - assertEquals("/%2f,%3a/c,d/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); + assertEquals("/%2f,%3a/_search", initialSearch(searchRequest, query, remoteVersion).getEndpoint()); assertWarnings(DEPRECATED_URL_ENCODED_INDEX_WARNING); @@ -107,20 +105,6 @@ public void testIntialSearchPath() { expectBadStartRequest(searchRequest, "Index", ",", "%2fcat,"); searchRequest.indices("%3ccat/"); expectBadStartRequest(searchRequest, "Index", "/", "%3ccat/"); - - searchRequest.indices("ok"); - searchRequest.types("cat,"); - expectBadStartRequest(searchRequest, "Type", ",", "cat,"); - searchRequest.types("cat,", "dog"); - expectBadStartRequest(searchRequest, "Type", ",", "cat,"); - searchRequest.types("dog", "cat,"); - expectBadStartRequest(searchRequest, "Type", ",", "cat,"); - searchRequest.types("cat/"); - expectBadStartRequest(searchRequest, "Type", "/", "cat/"); - searchRequest.types("cat/", "dog"); - expectBadStartRequest(searchRequest, "Type", "/", "cat/"); - searchRequest.types("dog", "cat/"); - expectBadStartRequest(searchRequest, "Type", "/", "cat/"); } private void expectBadStartRequest(SearchRequest searchRequest, String type, String bad, String failed) { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 54cb39c736ff8..337bc67796f8e 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -178,7 +178,6 @@ public void testParseStartOk() throws Exception { assertThat(r.getFailures(), empty()); assertThat(r.getHits(), hasSize(1)); assertEquals("test", r.getHits().get(0).getIndex()); - assertEquals("test", r.getHits().get(0).getType()); assertEquals("AVToMiC250DjIiBO3yJ_", r.getHits().get(0).getId()); assertEquals("{\"test\":\"test2\"}", r.getHits().get(0).getSource().utf8ToString()); assertNull(r.getHits().get(0).getRouting()); @@ -196,7 +195,6 @@ public void testParseScrollOk() throws Exception { assertThat(r.getFailures(), empty()); assertThat(r.getHits(), hasSize(1)); assertEquals("test", r.getHits().get(0).getIndex()); - assertEquals("test", r.getHits().get(0).getType()); assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); assertNull(r.getHits().get(0).getRouting()); @@ -246,7 +244,6 @@ public void testScanJumpStart() throws Exception { assertThat(r.getFailures(), empty()); assertThat(r.getHits(), hasSize(1)); assertEquals("test", r.getHits().get(0).getIndex()); - assertEquals("test", r.getHits().get(0).getType()); assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); assertNull(r.getHits().get(0).getRouting()); @@ -277,7 +274,6 @@ public void testParseRejection() throws Exception { ); assertThat(r.getHits(), hasSize(1)); assertEquals("test", r.getHits().get(0).getIndex()); - assertEquals("test", r.getHits().get(0).getType()); assertEquals("AVToMiC250DjIiBO3yJ_", r.getHits().get(0).getId()); assertEquals("{\"test\":\"test1\"}", r.getHits().get(0).getSource().utf8ToString()); called.set(true); @@ -308,7 +304,6 @@ public void testParseFailureWithStatus() throws Exception { ); assertThat(r.getHits(), hasSize(1)); assertEquals("test", r.getHits().get(0).getIndex()); - assertEquals("test", r.getHits().get(0).getType()); assertEquals("10000", r.getHits().get(0).getId()); assertEquals("{\"test\":\"test10000\"}", r.getHits().get(0).getSource().utf8ToString()); called.set(true); diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/10_basic.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/10_basic.yml index c47d8ff0e0756..7783bbd1f9476 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/10_basic.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/10_basic.yml @@ -91,7 +91,6 @@ - skip: version: "6.7.0 - " reason: reindex moved to rely on sequence numbers for concurrency control - - do: indices.create: index: test @@ -124,7 +123,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} @@ -145,10 +143,6 @@ --- "Response for version conflict (seq no powered)": - - skip: - version: " - 6.6.99" - reason: reindex moved to rely on sequence numbers for concurrency control - - do: indices.create: index: test @@ -181,7 +175,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} @@ -210,7 +203,6 @@ - do: index: index: test - type: _doc id: 1 body: { "text": "test" } - do: @@ -219,7 +211,6 @@ - do: index: index: test - type: _doc id: 1 body: { "text": "test2" } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/20_validation.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/20_validation.yml index b06cd2325571a..7086e048eba3e 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/20_validation.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/20_validation.yml @@ -285,7 +285,7 @@ indices.refresh: {} - do: - catch: /\[test\]\[_doc\]\[1\] didn't store _source/ + catch: /\[test\]\[1\] didn't store _source/ reindex: body: source: diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml index 770f372c210a8..9c38b13bb1ff0 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml @@ -399,9 +399,9 @@ mget: body: docs: - - { _index: index2, _type: _doc, _id: en_123} - - { _index: index2, _type: _doc, _id: en_456} - - { _index: index2, _type: _doc, _id: fr_789} + - { _index: index2, _id: en_123} + - { _index: index2, _id: en_456} + - { _index: index2, _id: fr_789} - is_true: docs.0.found - match: { docs.0._index: index2 } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/10_basic.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/10_basic.yml index f17b59e5806fe..4df12b31a0bed 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/10_basic.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/10_basic.yml @@ -104,7 +104,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} @@ -116,9 +115,6 @@ --- "Response for version conflict (seq no powered)": - - skip: - version: " - 6.6.99" - reason: reindex moved to rely on sequence numbers for concurrency control - do: indices.create: index: test @@ -147,7 +143,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} @@ -167,7 +162,6 @@ - do: index: index: test - type: _doc id: 1 body: { "text": "test" } - do: @@ -176,7 +170,6 @@ - do: index: index: test - type: _doc id: 1 body: { "text": "test2" } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/20_validation.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/20_validation.yml index c015b1a21c398..7b00fb59b02b2 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/20_validation.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/20_validation.yml @@ -150,7 +150,7 @@ indices.refresh: {} - do: - catch: /\[test\]\[_doc\]\[1\] didn't store _source/ + catch: /\[test\]\[1\] didn't store _source/ update_by_query: index: test diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 2e62fdd697ec9..f0029837c7d03 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -149,10 +149,7 @@ thirdPartyAudit { // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', - 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', - 'org.bouncycastle.jce.provider.BouncyCastleProvider', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', - 'org.bouncycastle.asn1.x500.X500Name', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.72.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.72.Final.jar.sha1 deleted file mode 100644 index f1398e52d8c74..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f306eec3f79541f9b8af9c471a0d5b63b7996272 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.73.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..e5833785ebb7e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +244a569c9aae973f6f485ac9801d79c1eca36daa \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.72.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.72.Final.jar.sha1 deleted file mode 100644 index f70b5c0909d7c..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -613c4019d687db4e9a5532564e442f83c4474ed7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.73.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..dcdc1e4e58afe --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +9496a30a349863a4c6fa10d5c36b4f3b495d3a31 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.72.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.72.Final.jar.sha1 deleted file mode 100644 index 8c7611afca886..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8f062d67303a5e4b2bc2ad48fb4fd8c99108e45 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.73.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..374cfb98614d5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +1ceeac4429b9bd517dc05e376a144bbe6b6bd038 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.72.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.72.Final.jar.sha1 deleted file mode 100644 index bfdf4a5cf8585..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a55bac9c3af5f59828207b551a96ac19bbfc341e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.73.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..e80a6e2569d81 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +27731b58d741b6faa6a00fa3285e7a55cc47be01 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.72.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.72.Final.jar.sha1 deleted file mode 100644 index d6cc1771a2964..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9feee089fee606c64be90c0332db9aef1f7d8e46 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.73.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..0e227997874bf --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +1a2231c0074f88254865c3769a4b5842939ea04d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.72.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.72.Final.jar.sha1 deleted file mode 100644 index d08a6f6e7e42d..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ff458458ea32ed1156086820b624a815fcbf2c0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.73.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..ba24531724fb5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +bfe83710f0c1739019613e81a06101020ca65def \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.72.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.72.Final.jar.sha1 deleted file mode 100644 index 603f145303012..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -99138b436a584879355aca8fe3c64b46227d5d79 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.73.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..6a8647497f210 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +abb155ddff196ccedfe85b810d4b9375ef85fcfa \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index e95a730c2b755..08df9259d475f 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -94,7 +94,7 @@ public void testLimitsInFlightRequests() throws Exception { List> requests = new ArrayList<>(); for (int i = 0; i < 150; i++) { - requests.add(Tuple.tuple("/index/type/_bulk", bulkRequest)); + requests.add(Tuple.tuple("/index/_bulk", bulkRequest)); } HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java index c1c689471fc82..ea3f21dd0ed3b 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java @@ -53,6 +53,7 @@ public class OpenSearchLoggingHandlerIT extends OpenSearchNetty4IntegTestCase { public void setUp() throws Exception { super.setUp(); appender = MockLogAppender.createForLoggers( + "^[^\n]+$", /* Only consider single line log statements */ LogManager.getLogger(OpenSearchLoggingHandler.class), LogManager.getLogger(TransportLogger.class), LogManager.getLogger(TcpTransport.class) @@ -66,12 +67,13 @@ public void tearDown() throws Exception { @TestLogging(value = "org.opensearch.transport.netty4.OpenSearchLoggingHandler:trace,org.opensearch.transport.TransportLogger:trace", reason = "to ensure we log network events on TRACE level") public void testLoggingHandler() { - final String writePattern = ".*\\[length: \\d+" + final String writePattern = "^.*\\[length: \\d+" + ", request id: \\d+" + ", type: request" - + ", version: .*" + + ", version: [^,]+" + + ", header size: \\d+B" + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" - + " WRITE: \\d+B"; + + " WRITE: \\d+B$"; final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), @@ -86,12 +88,12 @@ public void testLoggingHandler() { "*FLUSH*" ); - final String readPattern = ".*\\[length: \\d+" + final String readPattern = "^.*\\[length: \\d+" + ", request id: \\d+" + ", type: request" - + ", version: .*" + + ", version: [^,]+" + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" - + " READ: \\d+B"; + + " READ: \\d+B$"; final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation( "hot threads request", diff --git a/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java b/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java index a8fc705363bef..1593488701e26 100644 --- a/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java +++ b/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java @@ -86,42 +86,6 @@ public void testIndexExists() throws IOException { headTestCase("/test", singletonMap("pretty", "true"), greaterThan(0)); } - public void testTypeExists() throws IOException { - createTestDoc(); - headTestCase( - "/test/_mapping/_doc", - emptyMap(), - OK.getStatus(), - greaterThan(0), - "Type exists requests are deprecated, as types have been deprecated." - ); - headTestCase( - "/test/_mapping/_doc", - singletonMap("pretty", "true"), - OK.getStatus(), - greaterThan(0), - "Type exists requests are deprecated, as types have been deprecated." - ); - } - - public void testTypeDoesNotExist() throws IOException { - createTestDoc(); - headTestCase( - "/test/_mapping/does-not-exist", - emptyMap(), - NOT_FOUND.getStatus(), - greaterThan(0), - "Type exists requests are deprecated, as types have been deprecated." - ); - headTestCase( - "/text/_mapping/test,does-not-exist", - emptyMap(), - NOT_FOUND.getStatus(), - greaterThan(0), - "Type exists requests are deprecated, as types have been deprecated." - ); - } - public void testAliasExists() throws IOException { createTestDoc(); try (XContentBuilder builder = jsonBuilder()) { diff --git a/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java index b967298b30a41..12120e365fe29 100644 --- a/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java +++ b/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java @@ -99,7 +99,6 @@ public void testBasicUsage() throws Exception { // searching for either of the terms should return both results since they collate to the same value SearchRequest request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -143,7 +142,6 @@ public void testMultipleValues() throws Exception { // using sort mode = max, values B and C will be used for the sort SearchRequest request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false) .query(QueryBuilders.termQuery("collate", "a")) @@ -159,7 +157,6 @@ public void testMultipleValues() throws Exception { // same thing, using different sort mode that will use a for both docs request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false) .query(QueryBuilders.termQuery("collate", "a")) @@ -207,7 +204,6 @@ public void testNormalization() throws Exception { // searching for either of the terms should return both results since they collate to the same value SearchRequest request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -253,7 +249,6 @@ public void testSecondaryStrength() throws Exception { ); SearchRequest request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -300,7 +295,6 @@ public void testIgnorePunctuation() throws Exception { ); SearchRequest request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) @@ -348,7 +342,6 @@ public void testIgnoreWhitespace() throws Exception { ); SearchRequest request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC).sort("id", SortOrder.ASC) // secondary sort // should kick in on @@ -391,7 +384,6 @@ public void testNumerics() throws Exception { ); SearchRequest request = new SearchRequest().indices(index) - .types(type) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC)); SearchResponse response = client().search(request).actionGet(); @@ -434,7 +426,6 @@ public void testIgnoreAccentsButNotCase() throws Exception { ); SearchRequest request = new SearchRequest().indices(index) - .types(type) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC).sort("id", SortOrder.DESC)); SearchResponse response = client().search(request).actionGet(); @@ -472,7 +463,6 @@ public void testUpperCaseFirst() throws Exception { ); SearchRequest request = new SearchRequest().indices(index) - .types(type) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC)); SearchResponse response = client().search(request).actionGet(); @@ -522,7 +512,6 @@ public void testCustomRules() throws Exception { ); SearchRequest request = new SearchRequest().indices(index) - .types(type) .source( new SearchSourceBuilder().fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1])) diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IndexableBinaryStringTools.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IndexableBinaryStringTools.java index eb7e006857f07..c473ca53c6404 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IndexableBinaryStringTools.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IndexableBinaryStringTools.java @@ -48,7 +48,6 @@ * padding is indistinguishable from valid information. To overcome this * problem, a char is appended, indicating the number of encoded bytes in the * final content char. - *

* * @deprecated Implement {@link TermToBytesRefAttribute} and store bytes directly * instead. This class WAS removed in Lucene 5.0 diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java index e6a33c7545851..59b01b0ddb466 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -120,12 +120,12 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), mapperService, nullValue) { + return new SourceValueFetcher(name(), context, nullValue) { @Override protected String parseSourceValue(Object value) { String keywordValue = value.toString(); diff --git a/plugins/analysis-stempel/src/main/java/org/opensearch/index/analysis/pl/PolishAnalyzerProvider.java b/plugins/analysis-stempel/src/main/java/org/opensearch/index/analysis/pl/PolishAnalyzerProvider.java index 5a0cf81cc379e..2319b825892ac 100644 --- a/plugins/analysis-stempel/src/main/java/org/opensearch/index/analysis/pl/PolishAnalyzerProvider.java +++ b/plugins/analysis-stempel/src/main/java/org/opensearch/index/analysis/pl/PolishAnalyzerProvider.java @@ -46,7 +46,6 @@ public PolishAnalyzerProvider(IndexSettings indexSettings, Environment environme super(indexSettings, name, settings); analyzer = new PolishAnalyzer(PolishAnalyzer.getDefaultStopSet()); - analyzer.setVersion(version); } @Override diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle index e7ba996587e22..60ab39997216c 100644 --- a/plugins/analysis-ukrainian/build.gradle +++ b/plugins/analysis-ukrainian/build.gradle @@ -36,9 +36,9 @@ opensearchplugin { dependencies { api "org.apache.lucene:lucene-analyzers-morfologik:${versions.lucene}" - api "org.carrot2:morfologik-stemming:2.1.1" + api "org.carrot2:morfologik-stemming:2.1.8" api "org.carrot2:morfologik-fsa:2.1.1" - api "ua.net.nlp:morfologik-ukrainian-search:3.7.5" + api "ua.net.nlp:morfologik-ukrainian-search:4.9.1" } restResources { diff --git a/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.1.jar.sha1 deleted file mode 100644 index 22af41d2b6b1b..0000000000000 --- a/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c169bab2e7dd04f5cb03d179a73a4339cc1d0a2 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.8.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.8.jar.sha1 new file mode 100644 index 0000000000000..6dfcc82f05b39 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.8.jar.sha1 @@ -0,0 +1 @@ +409fa92db4cfb0f90a33d303732a4882cee3d1e7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 deleted file mode 100644 index 446e7a91161a8..0000000000000 --- a/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b8c8fbd740164d220ca7d18605b8b2092e163e9 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-4.9.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-4.9.1.jar.sha1 new file mode 100644 index 0000000000000..31035a1593bbc --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-4.9.1.jar.sha1 @@ -0,0 +1 @@ +98541e2d3e95d69244829c2855b10686b344c3b3 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/src/main/java/org/opensearch/index/analysis/UkrainianAnalyzerProvider.java b/plugins/analysis-ukrainian/src/main/java/org/opensearch/index/analysis/UkrainianAnalyzerProvider.java index b6bb76e0c9893..9beced7d465a6 100644 --- a/plugins/analysis-ukrainian/src/main/java/org/opensearch/index/analysis/UkrainianAnalyzerProvider.java +++ b/plugins/analysis-ukrainian/src/main/java/org/opensearch/index/analysis/UkrainianAnalyzerProvider.java @@ -48,7 +48,6 @@ public UkrainianAnalyzerProvider(IndexSettings indexSettings, Environment env, S Analysis.parseStopWords(env, settings, UkrainianMorfologikAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET) ); - analyzer.setVersion(version); } @Override diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 8aac387cea6bf..7bb9250ea40a6 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -54,7 +54,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-lang:commons-lang:2.6" api "commons-io:commons-io:2.7" - api 'javax.mail:mail:1.4.5' + api 'javax.mail:mail:1.4.7' api 'javax.inject:javax.inject:1' api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" @@ -64,7 +64,7 @@ dependencies { // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, // and whitelist this hack in JarHell - api 'javax.xml.bind:jaxb-api:2.2.2' + api 'javax.xml.bind:jaxb-api:2.3.1' } restResources { diff --git a/plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 deleted file mode 100644 index a37e187238933..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeb3021ca93dde265796d82015beecdcff95bf09 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/jaxb-api-2.3.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jaxb-api-2.3.1.jar.sha1 new file mode 100644 index 0000000000000..f4434214e1eec --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/jaxb-api-2.3.1.jar.sha1 @@ -0,0 +1 @@ +8531ad5ac454cc2deb9d4d32c40c4d7451939b5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 b/plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 deleted file mode 100644 index b79503e0c69d9..0000000000000 --- a/plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85319c87280f30e1afc54c355f91f44741beac49 diff --git a/plugins/discovery-azure-classic/licenses/mail-1.4.7.jar.sha1 b/plugins/discovery-azure-classic/licenses/mail-1.4.7.jar.sha1 new file mode 100644 index 0000000000000..0b9ba0ce9f186 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/mail-1.4.7.jar.sha1 @@ -0,0 +1 @@ +9add058589d5d85adeb625859bf2c5eeaaedf12d \ No newline at end of file diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index a6d4134d15a9b..7998e0861c7b1 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -134,17 +134,8 @@ tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( // classes are missing 'javax.jms.Message', - 'com.amazonaws.jmespath.JmesPathEvaluationVisitor', 'com.amazonaws.jmespath.JmesPathExpression', - 'com.amazonaws.jmespath.JmesPathField', - 'com.amazonaws.jmespath.JmesPathFlatten', - 'com.amazonaws.jmespath.JmesPathIdentity', - 'com.amazonaws.jmespath.JmesPathLengthFunction', - 'com.amazonaws.jmespath.JmesPathLiteral', - 'com.amazonaws.jmespath.JmesPathProjection', - 'com.amazonaws.jmespath.JmesPathSubExpression', 'com.amazonaws.jmespath.ObjectMapperSingleton', - 'com.amazonaws.jmespath.OpGreaterThan', 'software.amazon.ion.IonReader', 'software.amazon.ion.IonSystem', 'software.amazon.ion.IonType', diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.12.5.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.12.5.jar.sha1 deleted file mode 100644 index 797bcf2b161d4..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52d929d5bb21d0186fe24c09624cc3ee4bafc3b3 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.12.6.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..48ee3bf53c630 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.12.6.jar.sha1 @@ -0,0 +1 @@ +9487231edd6b0b1f14692c9cba9e0462809215d1 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.12.5.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.12.5.jar.sha1 deleted file mode 100644 index ca1bd46bc3cd3..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b064cf057f23d3d35390328c5030847efeffedde \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.12.6.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..f74842887d31b --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.12.6.jar.sha1 @@ -0,0 +1 @@ +fac216b606c1086e36acea6e572ee61572ad1670 \ No newline at end of file diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java index fe99746a5ce79..90d123e86e260 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java @@ -40,8 +40,8 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; import org.opensearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.fetch.FetchSubPhase.HitContext; -import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext.Field; import java.io.IOException; import java.util.ArrayList; @@ -55,12 +55,12 @@ public class AnnotatedTextHighlighter extends UnifiedHighlighter { @Override protected List loadFieldValues( CustomUnifiedHighlighter highlighter, + QueryShardContext context, MappedFieldType fieldType, - Field field, HitContext hitContext, boolean forceSource ) throws IOException { - List fieldValues = super.loadFieldValues(highlighter, fieldType, field, hitContext, forceSource); + List fieldValues = super.loadFieldValues(highlighter, context, fieldType, hitContext, forceSource); List strings = new ArrayList<>(fieldValues.size()); AnnotatedText[] annotations = new AnnotatedText[fieldValues.size()]; diff --git a/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java b/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java index af94bcfa79367..3e3119094cb69 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java @@ -42,6 +42,7 @@ import org.opensearch.index.mapper.FieldTypeTestCase; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.query.IntervalMode; import java.io.IOException; import java.util.Collections; @@ -51,7 +52,7 @@ public class AnnotatedTextFieldTypeTests extends FieldTypeTestCase { public void testIntervals() throws IOException { MappedFieldType ft = new AnnotatedTextFieldMapper.AnnotatedTextFieldType("field", Collections.emptyMap()); NamedAnalyzer a = new NamedAnalyzer("name", AnalyzerScope.INDEX, new StandardAnalyzer()); - IntervalsSource source = ft.intervals("Donald Trump", 0, true, a, false); + IntervalsSource source = ft.intervals("Donald Trump", 0, IntervalMode.ORDERED, a, false); assertEquals(Intervals.phrase(Intervals.term("donald"), Intervals.term("trump")), source); } diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml index be3b32e6338dc..b4acccf36879d 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml +++ b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml @@ -3,10 +3,6 @@ --- "annotated highlighter on annotated text": - - skip: - version: " - 6.4.99" - reason: Annotated text type introduced in 6.5.0 - - do: indices.create: index: annotated @@ -80,10 +76,6 @@ --- "issue 39395 thread safety issue -requires multiple calls to reveal": - - skip: - version: " - 6.4.99" - reason: Annotated text type introduced in 6.5.0 - - do: indices.create: index: annotated diff --git a/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java index ffbb1905bcd98..4e87b03132055 100644 --- a/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -44,7 +44,6 @@ import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.opensearch.index.mapper.FieldMapper; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ParametrizedFieldMapper; import org.opensearch.index.mapper.ParseContext; import org.opensearch.index.mapper.SourceValueFetcher; @@ -121,8 +120,8 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.toString(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.toString(name(), context, format); } @Override diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java index 4811c7d12759c..10edd6d2586d9 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java @@ -137,7 +137,7 @@ public void testBasic() throws Exception { assertAcked(prepareCreate("test").addMapping("type", "_size", "enabled=true")); final String source = "{\"f\":10}"; indexRandom(true, client().prepareIndex("test", "type", "1").setSource(source, XContentType.JSON)); - GetResponse getResponse = client().prepareGet("test", "type", "1").setStoredFields("_size").get(); + GetResponse getResponse = client().prepareGet("test", "1").setStoredFields("_size").get(); assertNotNull(getResponse.getField("_size")); assertEquals(source.length(), (int) getResponse.getField("_size").getValue()); } diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 81ef4e98923a3..88ce2f667cee2 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -46,7 +46,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.22.0' api 'com.azure:azure-storage-common:12.14.0' - api 'com.azure:azure-core-http-netty:1.11.2' + api 'com.azure:azure-core-http-netty:1.11.7' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -56,7 +56,7 @@ dependencies { implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.14.1' api 'org.reactivestreams:reactive-streams:1.0.3' - api 'io.projectreactor:reactor-core:3.4.11' + api 'io.projectreactor:reactor-core:3.4.15' api 'io.projectreactor.netty:reactor-netty:1.0.13' api 'io.projectreactor.netty:reactor-netty-core:1.0.13' api 'io.projectreactor.netty:reactor-netty-http:1.0.13' @@ -119,25 +119,16 @@ thirdPartyAudit { 'io.micrometer.core.instrument.search.Search', 'io.netty.channel.epoll.Epoll', 'io.netty.channel.epoll.EpollDatagramChannel', - 'io.netty.channel.epoll.EpollDomainDatagramChannel', - 'io.netty.channel.epoll.EpollDomainSocketChannel', - 'io.netty.channel.epoll.EpollEventLoopGroup', - 'io.netty.channel.epoll.EpollServerDomainSocketChannel', 'io.netty.channel.epoll.EpollServerSocketChannel', 'io.netty.channel.epoll.EpollSocketChannel', 'io.netty.channel.kqueue.KQueue', 'io.netty.channel.kqueue.KQueueDatagramChannel', - 'io.netty.channel.kqueue.KQueueDomainDatagramChannel', - 'io.netty.channel.kqueue.KQueueDomainSocketChannel', - 'io.netty.channel.kqueue.KQueueEventLoopGroup', - 'io.netty.channel.kqueue.KQueueServerDomainSocketChannel', 'io.netty.channel.kqueue.KQueueServerSocketChannel', 'io.netty.channel.kqueue.KQueueSocketChannel', 'io.netty.handler.codec.haproxy.HAProxyMessage', 'io.netty.handler.codec.haproxy.HAProxyMessageDecoder', 'io.netty.incubator.channel.uring.IOUring', 'io.netty.incubator.channel.uring.IOUringDatagramChannel', - 'io.netty.incubator.channel.uring.IOUringEventLoopGroup', 'io.netty.incubator.channel.uring.IOUringServerSocketChannel', 'io.netty.incubator.channel.uring.IOUringSocketChannel', 'javax.activation.DataHandler', @@ -167,7 +158,6 @@ thirdPartyAudit { 'javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter', 'javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter$DEFAULT', 'javax.xml.bind.annotation.adapters.XmlJavaTypeAdapters', - 'kotlin.TypeCastException', 'kotlin.collections.ArraysKt', 'kotlin.jvm.JvmClassMappingKt', 'kotlin.jvm.functions.Function0', diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.2.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.2.jar.sha1 deleted file mode 100644 index 3d3c0a59a77ba..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7d84ec31d73a7b51bc72044789768b25fb2b14f4 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.7.jar.sha1 new file mode 100644 index 0000000000000..25db85393f2af --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.7.jar.sha1 @@ -0,0 +1 @@ +c6b14fcca3e75acc8dbe07ac101afd05d48a1647 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.12.5.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.12.5.jar.sha1 deleted file mode 100644 index 797bcf2b161d4..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52d929d5bb21d0186fe24c09624cc3ee4bafc3b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.12.6.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..48ee3bf53c630 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.12.6.jar.sha1 @@ -0,0 +1 @@ +9487231edd6b0b1f14692c9cba9e0462809215d1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.12.5.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.12.5.jar.sha1 deleted file mode 100644 index ca1bd46bc3cd3..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b064cf057f23d3d35390328c5030847efeffedde \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.12.6.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..f74842887d31b --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.12.6.jar.sha1 @@ -0,0 +1 @@ +fac216b606c1086e36acea6e572ee61572ad1670 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.12.5.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.12.5.jar.sha1 deleted file mode 100644 index 6711b58ae535f..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b872e5a9f7e6644c2dd8d7358ed9fad714d7c90 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.12.6.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..43ee9816d99e3 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.12.6.jar.sha1 @@ -0,0 +1 @@ +fecb8514c3a89102bd619b6c624f906a6098b588 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.12.5.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.12.5.jar.sha1 deleted file mode 100644 index 930cb084f54c2..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a0a9870b681a72789c5c6bdc380e45ab719c6aa3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.12.6.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..d005cc1bd5f11 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.12.6.jar.sha1 @@ -0,0 +1 @@ +0f7d0d854f24c4254885c275a09fb885ef578b48 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.12.5.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.12.5.jar.sha1 deleted file mode 100644 index f10aa2634ca97..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02b389d7206327e54ae31f709ab75a4a3f33e148 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.12.6.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..686d813e002c8 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.12.6.jar.sha1 @@ -0,0 +1 @@ +a0bea2c6f98eb0dc24208b54a53da80ea459c156 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.72.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.72.Final.jar.sha1 deleted file mode 100644 index c8db6b5611676..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43030c869832981a626458073e86070858747e06 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.73.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..320ae18c98bda --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +46137a5b01a5202059324cf4300443e53f11a38d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.72.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.72.Final.jar.sha1 deleted file mode 100644 index 710f2136045ee..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b269e666fbace27d2c1efa57703e99b09655822 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.73.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..d7f5a464bcc00 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +0eb145bc31fd32a20fd2a3e8b30736d2e0248b0c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.72.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.72.Final.jar.sha1 deleted file mode 100644 index 4f9cb69436cdc..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3bc427b6e2dc4bb6dc9d18d1cc47f8530970a8b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.73.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..6ba41c576c93d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +cefa44d8f5dcaab21179d945f12b6c6d7325cce9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.72.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.72.Final.jar.sha1 deleted file mode 100644 index 791abf7a66002..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -90de0fe610454d4296052ae36acb8a6a1d0333f1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.73.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..f50c9abf023cf --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +d1afa6876c3d3bdbdbe5127ddd495e6514d6e600 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.72.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.72.Final.jar.sha1 deleted file mode 100644 index a50ef9c68c247..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2fc945c3688e2b7d6ddb2c25f33832349657fa0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.73.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..817fa4cc0d86f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +97cdf5fb97f8d961cfa3ffb05175009b90e5cfee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.72.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.72.Final.jar.sha1 deleted file mode 100644 index 4530a75b61263..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cedc023ffdcb68543b22a1ebc7960a160589aa09 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.73.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..22b8f58bd5103 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +4701063d36f390e02da6da85c13e32a0e78349d2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.11.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.11.jar.sha1 deleted file mode 100644 index fc0911be8fedf..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e305f6aa6e6da26aa42726f8cfd69b6ab53d7c0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 new file mode 100644 index 0000000000000..a89de48b20b51 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 @@ -0,0 +1 @@ +28ccf513fe64709c8ded30ea3f387fc718db9626 \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java index 6345103c6ecc6..753c902a6eb01 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java @@ -413,8 +413,8 @@ public Map children(BlobPath path) throws URISyntaxExcept public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws URISyntaxException, BlobStorageException, IOException { - assert inputStream - .markSupported() : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken"; + assert inputStream.markSupported() + : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken"; logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); final Tuple> client = client(); final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(container); diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java index aa41941436171..82ab5243a09aa 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java @@ -94,13 +94,15 @@ public List> getSettings() { AzureStorageSettings.ENDPOINT_SUFFIX_SETTING, AzureStorageSettings.TIMEOUT_SETTING, AzureStorageSettings.MAX_RETRIES_SETTING, - AzureStorageSettings.PROXY_TYPE_SETTING, - AzureStorageSettings.PROXY_HOST_SETTING, - AzureStorageSettings.PROXY_PORT_SETTING, AzureStorageSettings.CONNECT_TIMEOUT_SETTING, AzureStorageSettings.WRITE_TIMEOUT_SETTING, AzureStorageSettings.READ_TIMEOUT_SETTING, - AzureStorageSettings.RESPONSE_TIMEOUT_SETTING + AzureStorageSettings.RESPONSE_TIMEOUT_SETTING, + AzureStorageSettings.PROXY_TYPE_SETTING, + AzureStorageSettings.PROXY_HOST_SETTING, + AzureStorageSettings.PROXY_PORT_SETTING, + AzureStorageSettings.PROXY_USERNAME_SETTING, + AzureStorageSettings.PROXY_PASSWORD_SETTING ); } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index 6cd3a149c6957..3800be7c2d27d 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -43,7 +43,6 @@ import com.azure.core.http.HttpRequest; import com.azure.core.http.HttpResponse; import com.azure.core.http.ProxyOptions; -import com.azure.core.http.ProxyOptions.Type; import com.azure.core.http.netty.NettyAsyncHttpClientBuilder; import com.azure.core.http.policy.HttpPipelinePolicy; import com.azure.core.util.Configuration; @@ -66,12 +65,11 @@ import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import java.net.InetSocketAddress; -import java.net.Proxy; +import java.net.Authenticator; +import java.net.PasswordAuthentication; import java.net.URISyntaxException; import java.security.InvalidKeyException; import java.time.Duration; -import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -169,15 +167,20 @@ private ClientState buildClient(AzureStorageSettings azureStorageSettings, BiCon final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(new NioThreadFactory()); final NettyAsyncHttpClientBuilder clientBuilder = new NettyAsyncHttpClientBuilder().eventLoopGroup(eventLoopGroup); - final Proxy proxy = azureStorageSettings.getProxy(); - if (proxy != null) { - final Type type = Arrays.stream(Type.values()) - .filter(t -> t.toProxyType().equals(proxy.type())) - .findFirst() - .orElseThrow(() -> new IllegalArgumentException("Unsupported proxy type: " + proxy.type())); - - clientBuilder.proxy(new ProxyOptions(type, (InetSocketAddress) proxy.address())); - } + SocketAccess.doPrivilegedVoidException(() -> { + final ProxySettings proxySettings = azureStorageSettings.getProxySettings(); + if (proxySettings != ProxySettings.NO_PROXY_SETTINGS) { + if (proxySettings.isAuthenticated()) { + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(proxySettings.getUsername(), proxySettings.getPassword().toCharArray()); + } + }); + } + clientBuilder.proxy(new ProxyOptions(proxySettings.getType().toProxyType(), proxySettings.getAddress())); + } + }); final TimeValue connectTimeout = azureStorageSettings.getConnectTimeout(); if (connectTimeout != null) { diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java index 94ec553ab760e..c9a031451bccd 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java @@ -44,8 +44,6 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.Proxy; import java.net.UnknownHostException; import java.util.Collections; import java.util.HashMap; @@ -143,10 +141,10 @@ final class AzureStorageSettings { ); /** The type of the proxy to connect to azure through. Can be direct (no proxy, default), http or socks */ - public static final AffixSetting PROXY_TYPE_SETTING = Setting.affixKeySetting( + public static final AffixSetting PROXY_TYPE_SETTING = Setting.affixKeySetting( AZURE_CLIENT_PREFIX_KEY, "proxy.type", - (key) -> new Setting<>(key, "direct", s -> Proxy.Type.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope), + (key) -> new Setting<>(key, "direct", s -> ProxySettings.ProxyType.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope), () -> ACCOUNT_SETTING, () -> KEY_SETTING ); @@ -162,27 +160,50 @@ final class AzureStorageSettings { ); /** The port of a proxy to connect to azure through. */ - public static final Setting PROXY_PORT_SETTING = Setting.affixKeySetting( + public static final AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting( AZURE_CLIENT_PREFIX_KEY, "proxy.port", (key) -> Setting.intSetting(key, 0, 0, 65535, Setting.Property.NodeScope), + () -> KEY_SETTING, () -> ACCOUNT_SETTING, + () -> PROXY_TYPE_SETTING, + () -> PROXY_HOST_SETTING + ); + + /** The username of a proxy to connect */ + static final AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "proxy.username", + key -> SecureSetting.secureString(key, null), () -> KEY_SETTING, + () -> ACCOUNT_SETTING, () -> PROXY_TYPE_SETTING, () -> PROXY_HOST_SETTING ); + /** The password of a proxy to connect */ + static final AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "proxy.password", + key -> SecureSetting.secureString(key, null), + () -> KEY_SETTING, + () -> ACCOUNT_SETTING, + () -> PROXY_TYPE_SETTING, + () -> PROXY_HOST_SETTING, + () -> PROXY_USERNAME_SETTING + ); + private final String account; private final String connectString; private final String endpointSuffix; private final TimeValue timeout; private final int maxRetries; - private final Proxy proxy; private final LocationMode locationMode; private final TimeValue connectTimeout; private final TimeValue writeTimeout; private final TimeValue readTimeout; private final TimeValue responseTimeout; + private final ProxySettings proxySettings; // copy-constructor private AzureStorageSettings( @@ -191,24 +212,24 @@ private AzureStorageSettings( String endpointSuffix, TimeValue timeout, int maxRetries, - Proxy proxy, LocationMode locationMode, TimeValue connectTimeout, TimeValue writeTimeout, TimeValue readTimeout, - TimeValue responseTimeout + TimeValue responseTimeout, + ProxySettings proxySettings ) { this.account = account; this.connectString = connectString; this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; - this.proxy = proxy; this.locationMode = locationMode; this.connectTimeout = connectTimeout; this.writeTimeout = writeTimeout; this.readTimeout = readTimeout; this.responseTimeout = responseTimeout; + this.proxySettings = proxySettings; } private AzureStorageSettings( @@ -218,42 +239,23 @@ private AzureStorageSettings( String endpointSuffix, TimeValue timeout, int maxRetries, - Proxy.Type proxyType, - String proxyHost, - Integer proxyPort, TimeValue connectTimeout, TimeValue writeTimeout, TimeValue readTimeout, - TimeValue responseTimeout + TimeValue responseTimeout, + ProxySettings proxySettings ) { this.account = account; this.connectString = buildConnectString(account, key, sasToken, endpointSuffix); this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; - // Register the proxy if we have any - // Validate proxy settings - if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { - throw new SettingsException("Azure Proxy port or host have been set but proxy type is not defined."); - } - if ((proxyType.equals(Proxy.Type.DIRECT) == false) && ((proxyPort == 0) || Strings.isEmpty(proxyHost))) { - throw new SettingsException("Azure Proxy type has been set but proxy host or port is not defined."); - } - - if (proxyType.equals(Proxy.Type.DIRECT)) { - proxy = null; - } else { - try { - proxy = new Proxy(proxyType, new InetSocketAddress(InetAddress.getByName(proxyHost), proxyPort)); - } catch (final UnknownHostException e) { - throw new SettingsException("Azure proxy host is unknown.", e); - } - } this.locationMode = LocationMode.PRIMARY_ONLY; this.connectTimeout = connectTimeout; this.writeTimeout = writeTimeout; this.readTimeout = readTimeout; this.responseTimeout = responseTimeout; + this.proxySettings = proxySettings; } public String getEndpointSuffix() { @@ -268,8 +270,8 @@ public int getMaxRetries() { return maxRetries; } - public Proxy getProxy() { - return proxy; + public ProxySettings getProxySettings() { + return proxySettings; } public String getConnectString() { @@ -325,7 +327,7 @@ public String toString() { sb.append(", timeout=").append(timeout); sb.append(", endpointSuffix='").append(endpointSuffix).append('\''); sb.append(", maxRetries=").append(maxRetries); - sb.append(", proxy=").append(proxy); + sb.append(", proxySettings=").append(proxySettings != ProxySettings.NO_PROXY_SETTINGS ? "PROXY_SET" : "PROXY_NOT_SET"); sb.append(", locationMode='").append(locationMode).append('\''); sb.append(", connectTimeout='").append(connectTimeout).append('\''); sb.append(", writeTimeout='").append(writeTimeout).append('\''); @@ -371,17 +373,42 @@ private static AzureStorageSettings getClientSettings(Settings settings, String getValue(settings, clientName, ENDPOINT_SUFFIX_SETTING), getValue(settings, clientName, TIMEOUT_SETTING), getValue(settings, clientName, MAX_RETRIES_SETTING), - getValue(settings, clientName, PROXY_TYPE_SETTING), - getValue(settings, clientName, PROXY_HOST_SETTING), - getValue(settings, clientName, PROXY_PORT_SETTING), getValue(settings, clientName, CONNECT_TIMEOUT_SETTING), getValue(settings, clientName, WRITE_TIMEOUT_SETTING), getValue(settings, clientName, READ_TIMEOUT_SETTING), - getValue(settings, clientName, RESPONSE_TIMEOUT_SETTING) + getValue(settings, clientName, RESPONSE_TIMEOUT_SETTING), + validateAndCreateProxySettings(settings, clientName) ); } } + static ProxySettings validateAndCreateProxySettings(final Settings settings, final String clientName) { + final ProxySettings.ProxyType proxyType = getConfigValue(settings, clientName, PROXY_TYPE_SETTING); + final String proxyHost = getConfigValue(settings, clientName, PROXY_HOST_SETTING); + final int proxyPort = getConfigValue(settings, clientName, PROXY_PORT_SETTING); + final SecureString proxyUserName = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); + final SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING); + // Validate proxy settings + if (proxyType == ProxySettings.ProxyType.DIRECT + && (proxyPort != 0 || Strings.hasText(proxyHost) || Strings.hasText(proxyUserName) || Strings.hasText(proxyPassword))) { + throw new SettingsException("Azure proxy port or host or username or password have been set but proxy type is not defined."); + } + if (proxyType != ProxySettings.ProxyType.DIRECT && (proxyPort == 0 || Strings.isEmpty(proxyHost))) { + throw new SettingsException("Azure proxy type has been set but proxy host or port is not defined."); + } + + if (proxyType == ProxySettings.ProxyType.DIRECT) { + return ProxySettings.NO_PROXY_SETTINGS; + } + + try { + final InetAddress proxyHostAddress = InetAddress.getByName(proxyHost); + return new ProxySettings(proxyType, proxyHostAddress, proxyPort, proxyUserName.toString(), proxyPassword.toString()); + } catch (final UnknownHostException e) { + throw new SettingsException("Azure proxy host is unknown.", e); + } + } + private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); @@ -407,12 +434,12 @@ static Map overrideLocationMode( entry.getValue().endpointSuffix, entry.getValue().timeout, entry.getValue().maxRetries, - entry.getValue().proxy, locationMode, entry.getValue().connectTimeout, entry.getValue().writeTimeout, entry.getValue().readTimeout, - entry.getValue().responseTimeout + entry.getValue().responseTimeout, + entry.getValue().getProxySettings() ) ); } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/ProxySettings.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/ProxySettings.java new file mode 100644 index 0000000000000..df8c95e69acf2 --- /dev/null +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/ProxySettings.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.azure; + +import com.azure.core.http.ProxyOptions; +import org.opensearch.common.Strings; +import org.opensearch.common.settings.SettingsException; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Objects; + +public class ProxySettings { + + public static final ProxySettings NO_PROXY_SETTINGS = new ProxySettings(ProxyType.DIRECT, null, -1, null, null); + + private final ProxyType type; + + private final InetAddress host; + + private final String username; + + private final String password; + + private final int port; + + public static enum ProxyType { + HTTP(ProxyOptions.Type.HTTP.name()), + + /** + * Please use SOCKS4 instead + */ + @Deprecated + SOCKS(ProxyOptions.Type.SOCKS4.name()), + + SOCKS4(ProxyOptions.Type.SOCKS4.name()), + + SOCKS5(ProxyOptions.Type.SOCKS5.name()), + + DIRECT("DIRECT"); + + private final String name; + + private ProxyType(String name) { + this.name = name; + } + + public ProxyOptions.Type toProxyType() { + if (this == DIRECT) { + // We check it in settings, + // the probability that it could be thrown is small, but how knows + throw new SettingsException("Couldn't convert to Azure proxy type"); + } + return ProxyOptions.Type.valueOf(name()); + } + + } + + public ProxySettings(final ProxyType type, final InetAddress host, final int port, final String username, final String password) { + this.type = type; + this.host = host; + this.port = port; + this.username = username; + this.password = password; + } + + public ProxyType getType() { + return this.type; + } + + public InetSocketAddress getAddress() { + return new InetSocketAddress(host, port); + } + + public String getUsername() { + return this.username; + } + + public String getPassword() { + return this.password; + } + + public boolean isAuthenticated() { + return Strings.isNullOrEmpty(username) == false && Strings.isNullOrEmpty(password) == false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final ProxySettings that = (ProxySettings) o; + return port == that.port + && type == that.type + && Objects.equals(host, that.host) + && Objects.equals(username, that.username) + && Objects.equals(password, that.password); + } + + @Override + public int hashCode() { + return Objects.hash(type, host, username, password, port); + } + +} diff --git a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy index f6d0f5fcb08d5..f3bf52ea46505 100644 --- a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy @@ -38,4 +38,7 @@ grant { permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.lang.RuntimePermission "setContextClassLoader"; + + // azure client set Authenticator for proxy username/password + permission java.net.NetPermission "setDefaultAuthenticator"; }; diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 785ebef7307bc..7f5ca73a507ad 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.azure; +import org.opensearch.common.Strings; import reactor.core.scheduler.Schedulers; import com.azure.core.http.policy.HttpPipelinePolicy; @@ -50,7 +51,6 @@ import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.Proxy; import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; @@ -299,9 +299,9 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { public void testNoProxy() { final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()).build(); final AzureStorageService mock = storageServiceWithSettingsValidation(settings); - assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); - assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); - assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + assertEquals(mock.storageSettings.get("azure1").getProxySettings(), ProxySettings.NO_PROXY_SETTINGS); + assertEquals(mock.storageSettings.get("azure2").getProxySettings(), ProxySettings.NO_PROXY_SETTINGS); + assertEquals(mock.storageSettings.get("azure3").getProxySettings(), ProxySettings.NO_PROXY_SETTINGS); } public void testProxyHttp() throws UnknownHostException { @@ -312,13 +312,13 @@ public void testProxyHttp() throws UnknownHostException { .put("azure.client.azure1.proxy.type", "http") .build(); final AzureStorageService mock = storageServiceWithSettingsValidation(settings); - final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final ProxySettings azure1Proxy = mock.storageSettings.get("azure1").getProxySettings(); assertThat(azure1Proxy, notNullValue()); - assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); - assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); - assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); - assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + assertThat(azure1Proxy.getType(), is(ProxySettings.ProxyType.HTTP)); + assertThat(azure1Proxy.getAddress(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); + assertEquals(ProxySettings.NO_PROXY_SETTINGS, mock.storageSettings.get("azure2").getProxySettings()); + assertEquals(ProxySettings.NO_PROXY_SETTINGS, mock.storageSettings.get("azure3").getProxySettings()); } public void testMultipleProxies() throws UnknownHostException { @@ -332,52 +332,59 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.type", "http") .build(); final AzureStorageService mock = storageServiceWithSettingsValidation(settings); - final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final ProxySettings azure1Proxy = mock.storageSettings.get("azure1").getProxySettings(); assertThat(azure1Proxy, notNullValue()); - assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); - assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); - final Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy(); + assertThat(azure1Proxy.getType(), is(ProxySettings.ProxyType.HTTP)); + assertThat(azure1Proxy.getAddress(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); + final ProxySettings azure2Proxy = mock.storageSettings.get("azure2").getProxySettings(); assertThat(azure2Proxy, notNullValue()); - assertThat(azure2Proxy.type(), is(Proxy.Type.HTTP)); - assertThat(azure2Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8081))); - assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + assertThat(azure2Proxy.getType(), is(ProxySettings.ProxyType.HTTP)); + assertThat(azure2Proxy.getAddress(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8081))); + assertTrue(Strings.isNullOrEmpty(azure2Proxy.getUsername())); + assertTrue(Strings.isNullOrEmpty(azure2Proxy.getPassword())); + assertEquals(mock.storageSettings.get("azure3").getProxySettings(), ProxySettings.NO_PROXY_SETTINGS); } public void testProxySocks() throws UnknownHostException { + final MockSecureSettings secureSettings = buildSecureSettings(); + secureSettings.setString("azure.client.azure1.proxy.username", "user"); + secureSettings.setString("azure.client.azure1.proxy.password", "pwd"); final Settings settings = Settings.builder() - .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) - .put("azure.client.azure1.proxy.type", "socks") + .put("azure.client.azure1.proxy.type", "socks5") + .setSecureSettings(secureSettings) .build(); final AzureStorageService mock = storageServiceWithSettingsValidation(settings); - final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final ProxySettings azure1Proxy = mock.storageSettings.get("azure1").getProxySettings(); assertThat(azure1Proxy, notNullValue()); - assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); - assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); - assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); - assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + assertThat(azure1Proxy.getType(), is(ProxySettings.ProxyType.SOCKS5)); + assertThat(azure1Proxy.getAddress(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); + assertEquals("user", azure1Proxy.getUsername()); + assertEquals("pwd", azure1Proxy.getPassword()); + assertEquals(ProxySettings.NO_PROXY_SETTINGS, mock.storageSettings.get("azure2").getProxySettings()); + assertEquals(ProxySettings.NO_PROXY_SETTINGS, mock.storageSettings.get("azure3").getProxySettings()); } public void testProxyNoHost() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.port", 8080) - .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) + .put("azure.client.azure1.proxy.type", randomFrom("socks", "socks4", "socks5", "http")) .build(); final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); - assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); + assertEquals("Azure proxy type has been set but proxy host or port is not defined.", e.getMessage()); } public void testProxyNoPort() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") - .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) + .put("azure.client.azure1.proxy.type", randomFrom("socks", "socks4", "socks5", "http")) .build(); final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); - assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); + assertEquals("Azure proxy type has been set but proxy host or port is not defined.", e.getMessage()); } public void testProxyNoType() { @@ -388,13 +395,13 @@ public void testProxyNoType() { .build(); final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); - assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); + assertEquals("Azure proxy port or host or username or password have been set but proxy type is not defined.", e.getMessage()); } public void testProxyWrongHost() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) - .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) + .put("azure.client.azure1.proxy.type", randomFrom("socks", "socks4", "socks5", "http")) .put("azure.client.azure1.proxy.host", "thisisnotavalidhostorwehavebeensuperunlucky") .put("azure.client.azure1.proxy.port", 8080) .build(); diff --git a/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 650d5c4474199..beaa95b732d52 100644 --- a/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -45,17 +45,14 @@ setup: body: - index: _index: docs - _type: doc _id: 1 - snapshot: one - index: _index: docs - _type: doc _id: 2 - snapshot: one - index: _index: docs - _type: doc _id: 3 - snapshot: one @@ -93,22 +90,18 @@ setup: body: - index: _index: docs - _type: doc _id: 4 - snapshot: two - index: _index: docs - _type: doc _id: 5 - snapshot: two - index: _index: docs - _type: doc _id: 6 - snapshot: two - index: _index: docs - _type: doc _id: 7 - snapshot: two diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 5ab7d79d6f0c5..c7e7bc5f40cce 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -61,8 +61,8 @@ dependencies { api 'com.google.api:api-common:1.8.1' api 'com.google.api:gax:1.54.0' api 'org.threeten:threetenbp:1.4.4' - api 'com.google.protobuf:protobuf-java-util:3.11.3' - api 'com.google.protobuf:protobuf-java:3.11.3' + api 'com.google.protobuf:protobuf-java-util:3.19.3' + api 'com.google.protobuf:protobuf-java:3.19.3' api 'com.google.code.gson:gson:2.8.9' api 'com.google.api.grpc:proto-google-common-protos:1.16.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' @@ -165,39 +165,23 @@ thirdPartyAudit { 'org.apache.http.client.RedirectHandler', 'org.apache.http.client.RequestDirector', 'org.apache.http.client.UserTokenHandler', - 'org.apache.http.client.methods.HttpDelete', 'org.apache.http.client.methods.HttpEntityEnclosingRequestBase', - 'org.apache.http.client.methods.HttpGet', - 'org.apache.http.client.methods.HttpHead', - 'org.apache.http.client.methods.HttpOptions', - 'org.apache.http.client.methods.HttpPost', - 'org.apache.http.client.methods.HttpPut', 'org.apache.http.client.methods.HttpRequestBase', - 'org.apache.http.client.methods.HttpTrace', 'org.apache.http.config.SocketConfig', 'org.apache.http.config.SocketConfig$Builder', 'org.apache.http.conn.ClientConnectionManager', 'org.apache.http.conn.ConnectionKeepAliveStrategy', 'org.apache.http.conn.params.ConnManagerParams', - 'org.apache.http.conn.params.ConnPerRouteBean', 'org.apache.http.conn.params.ConnRouteParams', 'org.apache.http.conn.routing.HttpRoutePlanner', 'org.apache.http.conn.scheme.PlainSocketFactory', - 'org.apache.http.conn.scheme.Scheme', 'org.apache.http.conn.scheme.SchemeRegistry', - 'org.apache.http.conn.ssl.SSLConnectionSocketFactory', 'org.apache.http.conn.ssl.SSLSocketFactory', 'org.apache.http.conn.ssl.X509HostnameVerifier', 'org.apache.http.entity.AbstractHttpEntity', 'org.apache.http.impl.client.DefaultHttpClient', - 'org.apache.http.impl.client.DefaultHttpRequestRetryHandler', 'org.apache.http.impl.client.HttpClientBuilder', 'org.apache.http.impl.conn.PoolingHttpClientConnectionManager', - 'org.apache.http.impl.conn.ProxySelectorRoutePlanner', - 'org.apache.http.impl.conn.SystemDefaultRoutePlanner', - 'org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager', - 'org.apache.http.message.BasicHttpResponse', - 'org.apache.http.params.BasicHttpParams', 'org.apache.http.params.HttpConnectionParams', 'org.apache.http.params.HttpParams', 'org.apache.http.params.HttpProtocolParams', diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.11.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.11.3.jar.sha1 deleted file mode 100644 index 371f423c3751e..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-3.11.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df12be70b968e32442821a2cfdc3cede5a42dec5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 new file mode 100644 index 0000000000000..655ecd1f1c1c9 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 @@ -0,0 +1 @@ +4b57f1b1b9e281231c3fcfc039ce3021e29ff570 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.11.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.11.3.jar.sha1 deleted file mode 100644 index 8f8d3cf3c9e49..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-util-3.11.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd4ba2dfeb1b010eb20ca27e65fbfb74fbbdcdb9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 new file mode 100644 index 0000000000000..9ba36d444c541 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 @@ -0,0 +1 @@ +3e6812cbbb7e6faffa7b56438740dec510e1fc1a \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java index d15b00712dea4..e8700570d2801 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java @@ -36,17 +36,23 @@ import org.opensearch.common.Strings; import org.opensearch.common.settings.SecureSetting; +import org.opensearch.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.net.Proxy; import java.net.URI; +import java.net.UnknownHostException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -114,6 +120,54 @@ public class GoogleCloudStorageClientSettings { key -> new Setting<>(key, "repository-gcs", Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated) ); + /** Proxy type */ + static final Setting.AffixSetting PROXY_TYPE_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.type", + (key) -> new Setting( + key, + Proxy.Type.DIRECT.name(), + s -> Proxy.Type.valueOf(s.toUpperCase(Locale.ROOT)), + Setting.Property.NodeScope + ) + ); + + /** The host of a proxy to connect */ + static final Setting.AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.host", + key -> Setting.simpleString(key, Setting.Property.NodeScope), + () -> PROXY_TYPE_SETTING + ); + + /** The port of a proxy to connect */ + static final Setting.AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.port", + key -> Setting.intSetting(key, 0, 0, (1 << 16) - 1, Setting.Property.NodeScope), + () -> PROXY_TYPE_SETTING, + () -> PROXY_HOST_SETTING + ); + + /** The username of a proxy to connect */ + static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.username", + key -> SecureSetting.secureString(key, null), + () -> PROXY_TYPE_SETTING, + () -> PROXY_HOST_SETTING + ); + + /** The password of a proxy to connect */ + static final Setting.AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.password", + key -> SecureSetting.secureString(key, null), + () -> PROXY_TYPE_SETTING, + () -> PROXY_HOST_SETTING, + () -> PROXY_USERNAME_SETTING + ); + /** The credentials used by the client to connect to the Storage endpoint. */ private final ServiceAccountCredentials credential; @@ -135,6 +189,9 @@ public class GoogleCloudStorageClientSettings { /** The token server URI. This leases access tokens in the oauth flow. */ private final URI tokenUri; + /** The GCS SDK Proxy settings. */ + private final ProxySettings proxySettings; + GoogleCloudStorageClientSettings( final ServiceAccountCredentials credential, final String endpoint, @@ -142,7 +199,8 @@ public class GoogleCloudStorageClientSettings { final TimeValue connectTimeout, final TimeValue readTimeout, final String applicationName, - final URI tokenUri + final URI tokenUri, + final ProxySettings proxySettings ) { this.credential = credential; this.endpoint = endpoint; @@ -151,6 +209,7 @@ public class GoogleCloudStorageClientSettings { this.readTimeout = readTimeout; this.applicationName = applicationName; this.tokenUri = tokenUri; + this.proxySettings = proxySettings; } public ServiceAccountCredentials getCredential() { @@ -181,6 +240,10 @@ public URI getTokenUri() { return tokenUri; } + public ProxySettings getProxySettings() { + return proxySettings; + } + public static Map load(final Settings settings) { final Map clients = new HashMap<>(); for (final String clientName : settings.getGroups(PREFIX).keySet()) { @@ -202,10 +265,39 @@ static GoogleCloudStorageClientSettings getClientSettings(final Settings setting getConfigValue(settings, clientName, CONNECT_TIMEOUT_SETTING), getConfigValue(settings, clientName, READ_TIMEOUT_SETTING), getConfigValue(settings, clientName, APPLICATION_NAME_SETTING), - getConfigValue(settings, clientName, TOKEN_URI_SETTING) + getConfigValue(settings, clientName, TOKEN_URI_SETTING), + validateAndCreateProxySettings(settings, clientName) ); } + static ProxySettings validateAndCreateProxySettings(final Settings settings, final String clientName) { + final Proxy.Type proxyType = getConfigValue(settings, clientName, PROXY_TYPE_SETTING); + final String proxyHost = getConfigValue(settings, clientName, PROXY_HOST_SETTING); + final int proxyPort = getConfigValue(settings, clientName, PROXY_PORT_SETTING); + final SecureString proxyUserName = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); + final SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING); + // Validate proxy settings + if (proxyType == Proxy.Type.DIRECT + && (proxyPort != 0 || Strings.hasText(proxyHost) || Strings.hasText(proxyUserName) || Strings.hasText(proxyPassword))) { + throw new SettingsException( + "Google Cloud Storage proxy port or host or username or password have been set but proxy type is not defined." + ); + } + if (proxyType != Proxy.Type.DIRECT && (proxyPort == 0 || Strings.isEmpty(proxyHost))) { + throw new SettingsException("Google Cloud Storage proxy type has been set but proxy host or port is not defined."); + } + if (proxyType == Proxy.Type.DIRECT) { + return ProxySettings.NO_PROXY_SETTINGS; + } + + try { + final InetAddress proxyHostAddress = InetAddress.getByName(proxyHost); + return new ProxySettings(proxyType, proxyHostAddress, proxyPort, proxyUserName.toString(), proxyPassword.toString()); + } catch (final UnknownHostException e) { + throw new SettingsException("Google Cloud Storage proxy host is unknown.", e); + } + } + /** * Loads the service account file corresponding to a given client name. If no * file is defined for the client, a {@code null} credential is returned. diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java index 7d51a6196e4c8..4908b26649b1b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -92,7 +92,12 @@ public List> getSettings() { GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING, GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING, GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING, - GoogleCloudStorageClientSettings.TOKEN_URI_SETTING + GoogleCloudStorageClientSettings.TOKEN_URI_SETTING, + GoogleCloudStorageClientSettings.PROXY_TYPE_SETTING, + GoogleCloudStorageClientSettings.PROXY_HOST_SETTING, + GoogleCloudStorageClientSettings.PROXY_PORT_SETTING, + GoogleCloudStorageClientSettings.PROXY_USERNAME_SETTING, + GoogleCloudStorageClientSettings.PROXY_PASSWORD_SETTING ); } diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index 8208dcfe597ff..f4b501327d52c 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -50,6 +50,9 @@ import org.opensearch.common.unit.TimeValue; import java.io.IOException; +import java.net.Authenticator; +import java.net.PasswordAuthentication; +import java.net.Proxy; import java.net.URI; import java.util.Map; @@ -142,13 +145,7 @@ synchronized void closeRepositoryClient(String repositoryName) { */ private Storage createClient(GoogleCloudStorageClientSettings clientSettings, GoogleCloudStorageOperationsStats stats) throws IOException { - final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> { - final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); - // requires java.lang.RuntimePermission "setFactory" - // Pin the TLS trust certificates. - builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); - return builder.build(); - }); + final HttpTransport httpTransport = createHttpTransport(clientSettings); final GoogleCloudStorageHttpStatsCollector httpStatsCollector = new GoogleCloudStorageHttpStatsCollector(stats); @@ -175,6 +172,28 @@ public HttpRequestInitializer getHttpRequestInitializer(ServiceOptions ser return storageOptions.getService(); } + private HttpTransport createHttpTransport(final GoogleCloudStorageClientSettings clientSettings) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> { + final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); + // requires java.lang.RuntimePermission "setFactory" + // Pin the TLS trust certificates. + builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); + final ProxySettings proxySettings = clientSettings.getProxySettings(); + if (proxySettings != ProxySettings.NO_PROXY_SETTINGS) { + if (proxySettings.isAuthenticated()) { + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(proxySettings.getUsername(), proxySettings.getPassword().toCharArray()); + } + }); + } + builder.setProxy(new Proxy(proxySettings.getType(), proxySettings.getAddress())); + } + return builder.build(); + }); + } + StorageOptions createStorageOptions( final GoogleCloudStorageClientSettings clientSettings, final HttpTransportOptions httpTransportOptions diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/ProxySettings.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/ProxySettings.java new file mode 100644 index 0000000000000..ddc6446d2c8c5 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/ProxySettings.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.gcs; + +import org.opensearch.common.Strings; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.util.Objects; + +public class ProxySettings { + + public static final ProxySettings NO_PROXY_SETTINGS = new ProxySettings(Proxy.Type.DIRECT, null, -1, null, null); + + private final Proxy.Type type; + + private final InetAddress host; + + private final String username; + + private final String password; + + private final int port; + + public ProxySettings(final Proxy.Type type, final InetAddress host, final int port, final String username, final String password) { + this.type = type; + this.host = host; + this.port = port; + this.username = username; + this.password = password; + } + + public Proxy.Type getType() { + return this.type; + } + + public InetSocketAddress getAddress() { + return new InetSocketAddress(host, port); + } + + public String getUsername() { + return this.username; + } + + public String getPassword() { + return this.password; + } + + public boolean isAuthenticated() { + return Strings.isNullOrEmpty(username) == false && Strings.isNullOrEmpty(password) == false; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ProxySettings that = (ProxySettings) o; + return port == that.port + && type == that.type + && Objects.equals(host, that.host) + && Objects.equals(username, that.username) + && Objects.equals(password, that.password); + } + + @Override + public int hashCode() { + return Objects.hash(type, host, username, password, port); + } +} diff --git a/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy index a6e2299f52f33..48af969b04dc3 100644 --- a/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy @@ -40,4 +40,7 @@ grant { // gcs client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; + + // gcs client set Authenticator for proxy username/password + permission java.net.NetPermission "setDefaultAuthenticator"; }; diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index 8dbf6b0ff2873..abf63e5525d4d 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -38,9 +38,13 @@ import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Proxy; import java.net.URI; import java.nio.charset.StandardCharsets; import java.security.KeyPair; @@ -92,6 +96,7 @@ public void testLoad() throws Exception { assertEquals(expectedClientSettings.getConnectTimeout(), actualClientSettings.getConnectTimeout()); assertEquals(expectedClientSettings.getReadTimeout(), actualClientSettings.getReadTimeout()); assertEquals(expectedClientSettings.getApplicationName(), actualClientSettings.getApplicationName()); + assertEquals(ProxySettings.NO_PROXY_SETTINGS, actualClientSettings.getProxySettings()); } if (deprecationWarnings.isEmpty() == false) { @@ -118,11 +123,131 @@ public void testProjectIdDefaultsToCredentials() throws Exception { CONNECT_TIMEOUT_SETTING.getDefault(Settings.EMPTY), READ_TIMEOUT_SETTING.getDefault(Settings.EMPTY), APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY), - new URI("") + new URI(""), + new ProxySettings(Proxy.Type.DIRECT, null, 0, null, null) ); assertEquals(credential.getProjectId(), googleCloudStorageClientSettings.getProjectId()); } + public void testHttpProxySettings() throws Exception { + final int port = randomIntBetween(10, 1080); + final String userName = randomAlphaOfLength(10); + final String password = randomAlphaOfLength(10); + final GoogleCloudStorageClientSettings gcsWithHttpProxyWithoutUserPwd = proxyGoogleCloudStorageClientSettings( + new ProxySettings(Proxy.Type.HTTP, InetAddress.getByName("127.0.0.10"), port, null, null) + ); + + assertEquals(Proxy.Type.HTTP, gcsWithHttpProxyWithoutUserPwd.getProxySettings().getType()); + assertEquals( + new InetSocketAddress(InetAddress.getByName("127.0.0.10"), port), + gcsWithHttpProxyWithoutUserPwd.getProxySettings().getAddress() + ); + assertNull(gcsWithHttpProxyWithoutUserPwd.getProxySettings().getUsername()); + assertNull(gcsWithHttpProxyWithoutUserPwd.getProxySettings().getPassword()); + assertFalse(gcsWithHttpProxyWithoutUserPwd.getProxySettings().isAuthenticated()); + + final GoogleCloudStorageClientSettings gcsWithHttpProxyWithUserPwd = proxyGoogleCloudStorageClientSettings( + new ProxySettings(Proxy.Type.HTTP, InetAddress.getByName("127.0.0.10"), port, userName, password) + ); + + assertEquals(Proxy.Type.HTTP, gcsWithHttpProxyWithoutUserPwd.getProxySettings().getType()); + assertEquals( + new InetSocketAddress(InetAddress.getByName("127.0.0.10"), port), + gcsWithHttpProxyWithUserPwd.getProxySettings().getAddress() + ); + assertTrue(gcsWithHttpProxyWithUserPwd.getProxySettings().isAuthenticated()); + assertEquals(userName, gcsWithHttpProxyWithUserPwd.getProxySettings().getUsername()); + assertEquals(password, gcsWithHttpProxyWithUserPwd.getProxySettings().getPassword()); + } + + public void testSocksProxySettings() throws Exception { + final int port = randomIntBetween(10, 1080); + final String userName = randomAlphaOfLength(10); + final String password = randomAlphaOfLength(10); + final GoogleCloudStorageClientSettings gcsWithHttpProxyWithoutUserPwd = proxyGoogleCloudStorageClientSettings( + new ProxySettings(Proxy.Type.SOCKS, InetAddress.getByName("127.0.0.10"), port, null, null) + ); + + assertEquals(Proxy.Type.SOCKS, gcsWithHttpProxyWithoutUserPwd.getProxySettings().getType()); + assertEquals( + new InetSocketAddress(InetAddress.getByName("127.0.0.10"), port), + gcsWithHttpProxyWithoutUserPwd.getProxySettings().getAddress() + ); + assertFalse(gcsWithHttpProxyWithoutUserPwd.getProxySettings().isAuthenticated()); + assertNull(gcsWithHttpProxyWithoutUserPwd.getProxySettings().getUsername()); + assertNull(gcsWithHttpProxyWithoutUserPwd.getProxySettings().getPassword()); + + final GoogleCloudStorageClientSettings gcsWithHttpProxyWithUserPwd = proxyGoogleCloudStorageClientSettings( + new ProxySettings(Proxy.Type.SOCKS, InetAddress.getByName("127.0.0.10"), port, userName, password) + ); + + assertEquals(Proxy.Type.SOCKS, gcsWithHttpProxyWithoutUserPwd.getProxySettings().getType()); + assertEquals( + new InetSocketAddress(InetAddress.getByName("127.0.0.10"), port), + gcsWithHttpProxyWithUserPwd.getProxySettings().getAddress() + ); + assertTrue(gcsWithHttpProxyWithUserPwd.getProxySettings().isAuthenticated()); + assertEquals(userName, gcsWithHttpProxyWithUserPwd.getProxySettings().getUsername()); + assertEquals(password, gcsWithHttpProxyWithUserPwd.getProxySettings().getPassword()); + } + + public void testProxyWrongHost() { + final Settings settings = Settings.builder() + .put("gcs.client.default.proxy.type", randomFrom("socks", "http")) + .put("gcs.client.default.proxy.host", "thisisnotavalidhostorwehavebeensuperunlucky") + .put("gcs.client.default.proxy.port", 8080) + .build(); + final SettingsException e = expectThrows(SettingsException.class, () -> GoogleCloudStorageClientSettings.load(settings)); + assertEquals("Google Cloud Storage proxy host is unknown.", e.getMessage()); + } + + public void testProxyTypeNotSet() { + final Settings hostPortSettings = Settings.builder() + .put("gcs.client.default.proxy.host", "127.0.0.1") + .put("gcs.client.default.proxy.port", 8080) + .build(); + + SettingsException e = expectThrows(SettingsException.class, () -> GoogleCloudStorageClientSettings.load(hostPortSettings)); + assertEquals( + "Google Cloud Storage proxy port or host or username or password have been set but proxy type is not defined.", + e.getMessage() + ); + + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("gcs.client.default.proxy.username", "aaaa"); + secureSettings.setString("gcs.client.default.proxy.password", "bbbb"); + final Settings usernamePasswordSettings = Settings.builder().setSecureSettings(secureSettings).build(); + + e = expectThrows(SettingsException.class, () -> GoogleCloudStorageClientSettings.load(usernamePasswordSettings)); + assertEquals( + "Google Cloud Storage proxy port or host or username or password have been set but proxy type is not defined.", + e.getMessage() + ); + } + + public void testProxyHostNotSet() { + final Settings settings = Settings.builder() + .put("gcs.client.default.proxy.port", 8080) + .put("gcs.client.default.proxy.type", randomFrom("socks", "http")) + .build(); + final SettingsException e = expectThrows(SettingsException.class, () -> GoogleCloudStorageClientSettings.load(settings)); + assertEquals("Google Cloud Storage proxy type has been set but proxy host or port is not defined.", e.getMessage()); + } + + private GoogleCloudStorageClientSettings proxyGoogleCloudStorageClientSettings(final ProxySettings proxySettings) throws Exception { + final String clientName = randomAlphaOfLength(5); + return new GoogleCloudStorageClientSettings( + randomCredential(clientName).v1(), + ENDPOINT_SETTING.getDefault(Settings.EMPTY), + PROJECT_ID_SETTING.getDefault(Settings.EMPTY), + CONNECT_TIMEOUT_SETTING.getDefault(Settings.EMPTY), + READ_TIMEOUT_SETTING.getDefault(Settings.EMPTY), + APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY), + new URI(""), + proxySettings + ); + } + /** Generates a given number of GoogleCloudStorageClientSettings along with the Settings to build them from **/ private Tuple, Settings> randomClients( final int nbClients, @@ -216,7 +341,8 @@ private static GoogleCloudStorageClientSettings randomClient( connectTimeout, readTimeout, applicationName, - new URI("") + new URI(""), + new ProxySettings(Proxy.Type.DIRECT, null, 0, null, null) ); } diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 7792a5f51c459..c5a3a26be082f 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -35,7 +35,7 @@ import com.google.auth.Credentials; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; - +import org.hamcrest.Matchers; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; @@ -43,7 +43,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; -import org.hamcrest.Matchers; import java.security.KeyPair; import java.security.KeyPairGenerator; @@ -51,9 +50,9 @@ import java.util.Locale; import java.util.UUID; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; public class GoogleCloudStorageServiceTests extends OpenSearchTestCase { diff --git a/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index dfd0ecc5788b1..f087a004efdf2 100644 --- a/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -48,17 +48,14 @@ setup: body: - index: _index: docs - _type: doc _id: 1 - snapshot: one - index: _index: docs - _type: doc _id: 2 - snapshot: one - index: _index: docs - _type: doc _id: 3 - snapshot: one @@ -96,22 +93,18 @@ setup: body: - index: _index: docs - _type: doc _id: 4 - snapshot: two - index: _index: docs - _type: doc _id: 5 - snapshot: two - index: _index: docs - _type: doc _id: 6 - snapshot: two - index: _index: docs - _type: doc _id: 7 - snapshot: two diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 9688835d0853f..6d2966faa59cf 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,14 +67,14 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" api 'com.google.code.gson:gson:2.8.9' runtimeOnly 'com.google.guava:guava:30.1.1-jre' - api 'com.google.protobuf:protobuf-java:2.5.0' + api 'com.google.protobuf:protobuf-java:3.19.3' api 'commons-logging:commons-logging:1.1.3' api 'commons-cli:commons-cli:1.2' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api 'org.apache.commons:commons-compress:1.21' api 'org.apache.commons:commons-configuration2:2.7' - api 'commons-io:commons-io:2.7' + api 'commons-io:commons-io:2.11.0' api 'org.apache.commons:commons-lang3:3.7' implementation 'com.google.re2j:re2j:1.1' api 'javax.servlet:servlet-api:2.5' @@ -83,7 +83,7 @@ dependencies { api 'net.minidev:json-smart:2.4.7' api 'org.apache.zookeeper:zookeeper:3.7.0' api "io.netty:netty-all:${versions.netty}" - implementation 'com.fasterxml.woodstox:woodstox-core:6.1.1' + implementation 'com.fasterxml.woodstox:woodstox-core:6.2.8' implementation 'org.codehaus.woodstox:stax2-api:4.2.1' hdfsFixture project(':test:fixtures:hdfs-fixture') @@ -113,6 +113,19 @@ tasks.named("dependencyLicenses").configure { mapping from: /hadoop-.*/, to: 'hadoop' } +thirdPartyAudit { + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.protobuf.MessageSchema', + 'com.google.protobuf.UnsafeUtil', + 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'com.google.protobuf.UnsafeUtil$MemoryAccessor' + ) +} + tasks.named("integTest").configure { it.dependsOn(project.tasks.named("bundlePlugin")) } @@ -235,7 +248,7 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec ) } } - + if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_1_8) { jvmArgs += ["--add-opens", "java.security.jgss/sun.security.krb5=ALL-UNNAMED"] } diff --git a/plugins/repository-hdfs/licenses/commons-io-2.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.11.0.jar.sha1 new file mode 100644 index 0000000000000..8adec30bade49 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.11.0.jar.sha1 @@ -0,0 +1 @@ +a2503f302b11ebde7ebc3df41daebe0e4eea3689 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.7.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.7.jar.sha1 deleted file mode 100644 index bbb1b15dd1e1e..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f2bd4ba11c4162733c13cc90ca7c7ea09967102 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.12.5.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.12.5.jar.sha1 deleted file mode 100644 index ca1bd46bc3cd3..0000000000000 --- a/plugins/repository-hdfs/licenses/jackson-databind-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b064cf057f23d3d35390328c5030847efeffedde \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.12.6.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..f74842887d31b --- /dev/null +++ b/plugins/repository-hdfs/licenses/jackson-databind-2.12.6.jar.sha1 @@ -0,0 +1 @@ +fac216b606c1086e36acea6e572ee61572ad1670 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.72.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.72.Final.jar.sha1 deleted file mode 100644 index dc2119726f690..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -86a69bf2f38d9f9f05c528e158f1532d3c6d625e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.73.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..52d6f22e73013 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +75c5a0ddb28adcc9e4991c75678d4a85dfe4a0b3 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-2.5.0.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-2.5.0.jar.sha1 deleted file mode 100644 index 71f918819e2b6..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a10732c76bfacdbd633a7eb0f7968b1059a65dfa \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 new file mode 100644 index 0000000000000..655ecd1f1c1c9 --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 @@ -0,0 +1 @@ +4b57f1b1b9e281231c3fcfc039ce3021e29ff570 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.1.1.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.1.1.jar.sha1 deleted file mode 100644 index f2ad1c80882d3..0000000000000 --- a/plugins/repository-hdfs/licenses/woodstox-core-6.1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -989bb31963ed1758b95c7c4381a91592a9a8df61 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 new file mode 100644 index 0000000000000..ae65cdebf26de --- /dev/null +++ b/plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 @@ -0,0 +1 @@ +670748292899c53b1963730d9eb7f8ab71314e90 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 1ebd6c5c50ffe..21ad7b6dd54c1 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -65,7 +65,7 @@ dependencies { // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, // and whitelist this hack in JarHell - api 'javax.xml.bind:jaxb-api:2.2.2' + api 'javax.xml.bind:jaxb-api:2.3.1' testImplementation project(':test:fixtures:s3-fixture') } diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.12.5.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.12.5.jar.sha1 deleted file mode 100644 index 797bcf2b161d4..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52d929d5bb21d0186fe24c09624cc3ee4bafc3b3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.12.6.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..48ee3bf53c630 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.12.6.jar.sha1 @@ -0,0 +1 @@ +9487231edd6b0b1f14692c9cba9e0462809215d1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.12.5.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.12.5.jar.sha1 deleted file mode 100644 index ca1bd46bc3cd3..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.12.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b064cf057f23d3d35390328c5030847efeffedde \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.12.6.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.12.6.jar.sha1 new file mode 100644 index 0000000000000..f74842887d31b --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.12.6.jar.sha1 @@ -0,0 +1 @@ +fac216b606c1086e36acea6e572ee61572ad1670 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jaxb-api-2.2.2.jar.sha1 b/plugins/repository-s3/licenses/jaxb-api-2.2.2.jar.sha1 deleted file mode 100644 index a37e187238933..0000000000000 --- a/plugins/repository-s3/licenses/jaxb-api-2.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeb3021ca93dde265796d82015beecdcff95bf09 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jaxb-api-2.3.1.jar.sha1 b/plugins/repository-s3/licenses/jaxb-api-2.3.1.jar.sha1 new file mode 100644 index 0000000000000..f4434214e1eec --- /dev/null +++ b/plugins/repository-s3/licenses/jaxb-api-2.3.1.jar.sha1 @@ -0,0 +1 @@ +8531ad5ac454cc2deb9d4d32c40c4d7451939b5d \ No newline at end of file diff --git a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 0ec3d272ee02d..a0c2d2e593a47 100644 --- a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -131,17 +131,14 @@ setup: body: - index: _index: docs - _type: doc _id: 1 - snapshot: one - index: _index: docs - _type: doc _id: 2 - snapshot: one - index: _index: docs - _type: doc _id: 3 - snapshot: one diff --git a/plugins/store-smb/src/yamlRestTest/resources/rest-api-spec/test/store_smb/15_index_creation.yml b/plugins/store-smb/src/yamlRestTest/resources/rest-api-spec/test/store_smb/15_index_creation.yml index 09e59c7fc9d9a..fbbdcb8f153e0 100644 --- a/plugins/store-smb/src/yamlRestTest/resources/rest-api-spec/test/store_smb/15_index_creation.yml +++ b/plugins/store-smb/src/yamlRestTest/resources/rest-api-spec/test/store_smb/15_index_creation.yml @@ -19,7 +19,6 @@ id: 1 - match: { _index: smb-test } - - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 1} - match: { _source: { foo: bar }} diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 6e309e109247b..88355cdf22728 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -82,10 +82,7 @@ thirdPartyAudit { // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', - 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', - 'org.bouncycastle.jce.provider.BouncyCastleProvider', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', - 'org.bouncycastle.asn1.x500.X500Name', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.72.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.72.Final.jar.sha1 deleted file mode 100644 index f1398e52d8c74..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f306eec3f79541f9b8af9c471a0d5b63b7996272 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.73.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..e5833785ebb7e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +244a569c9aae973f6f485ac9801d79c1eca36daa \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.72.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.72.Final.jar.sha1 deleted file mode 100644 index f70b5c0909d7c..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -613c4019d687db4e9a5532564e442f83c4474ed7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.73.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..dcdc1e4e58afe --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +9496a30a349863a4c6fa10d5c36b4f3b495d3a31 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.72.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.72.Final.jar.sha1 deleted file mode 100644 index 8c7611afca886..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8f062d67303a5e4b2bc2ad48fb4fd8c99108e45 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.73.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..374cfb98614d5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +1ceeac4429b9bd517dc05e376a144bbe6b6bd038 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.72.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.72.Final.jar.sha1 deleted file mode 100644 index bfdf4a5cf8585..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a55bac9c3af5f59828207b551a96ac19bbfc341e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.73.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..e80a6e2569d81 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +27731b58d741b6faa6a00fa3285e7a55cc47be01 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.72.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.72.Final.jar.sha1 deleted file mode 100644 index d6cc1771a2964..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9feee089fee606c64be90c0332db9aef1f7d8e46 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.73.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..0e227997874bf --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +1a2231c0074f88254865c3769a4b5842939ea04d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.72.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.72.Final.jar.sha1 deleted file mode 100644 index d08a6f6e7e42d..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ff458458ea32ed1156086820b624a815fcbf2c0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.73.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..ba24531724fb5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +bfe83710f0c1739019613e81a06101020ca65def \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.72.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.72.Final.jar.sha1 deleted file mode 100644 index 603f145303012..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.72.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -99138b436a584879355aca8fe3c64b46227d5d79 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.73.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.73.Final.jar.sha1 new file mode 100644 index 0000000000000..6a8647497f210 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.73.Final.jar.sha1 @@ -0,0 +1 @@ +abb155ddff196ccedfe85b810d4b9375ef85fcfa \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java index 1d705bce64852..561695c06effe 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java @@ -199,8 +199,8 @@ private static boolean assertMessageTypes(Object message) { + ". Found type: " + message.getClass() + "."; - assert ((HttpPipelinedResponse) message) - .getDelegateRequest() instanceof NioHttpResponse : "This channel only pipelined responses with a delegate of type: " + assert ((HttpPipelinedResponse) message).getDelegateRequest() instanceof NioHttpResponse + : "This channel only pipelined responses with a delegate of type: " + NioHttpResponse.class + ". Found type: " + ((HttpPipelinedResponse) message).getDelegateRequest().getClass() diff --git a/qa/evil-tests/src/test/java/org/opensearch/cluster/routing/EvilSystemPropertyTests.java b/qa/evil-tests/src/test/java/org/opensearch/cluster/routing/EvilSystemPropertyTests.java deleted file mode 100644 index a53683e38f8a9..0000000000000 --- a/qa/evil-tests/src/test/java/org/opensearch/cluster/routing/EvilSystemPropertyTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster.routing; - -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchTestCase; - -import static org.hamcrest.Matchers.equalTo; - -public class EvilSystemPropertyTests extends OpenSearchTestCase { - - @SuppressForbidden(reason = "manipulates system properties for testing") - public void testDisableSearchAllocationAwareness() { - Settings indexSettings = Settings.builder() - .put("cluster.routing.allocation.awareness.attributes", "test") - .build(); - OperationRouting routing = new OperationRouting(indexSettings, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - assertWarnings(OperationRouting.IGNORE_AWARENESS_ATTRIBUTES_DEPRECATION_MESSAGE); - assertThat(routing.getAwarenessAttributes().size(), equalTo(1)); - assertThat(routing.getAwarenessAttributes().get(0), equalTo("test")); - System.setProperty("opensearch.search.ignore_awareness_attributes", "true"); - try { - routing = new OperationRouting(indexSettings, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - assertTrue(routing.getAwarenessAttributes().isEmpty()); - } finally { - System.clearProperty("opensearch.search.ignore_awareness_attributes"); - } - - } -} diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 3c6232569f3f1..629e325427162 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -49,10 +49,6 @@ import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; -import org.opensearch.rest.action.document.RestBulkAction; -import org.opensearch.rest.action.document.RestIndexAction; -import org.opensearch.rest.action.document.RestUpdateAction; -import org.opensearch.rest.action.search.RestExplainAction; import org.opensearch.test.NotEqualMessageBuilder; import org.opensearch.test.XContentTestUtils; import org.opensearch.test.rest.OpenSearchRestTestCase; @@ -97,6 +93,7 @@ * with {@code tests.is_old_cluster} set to {@code false}. */ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { + private String index; private String type; @@ -163,6 +160,7 @@ public void testSearch() throws Exception { count, true, true, + randomBoolean(), i -> JsonXContent.contentBuilder().startObject() .field("string", randomAlphaOfLength(10)) .field("int", randomInt(100)) @@ -182,7 +180,7 @@ public void testSearch() throws Exception { assertBasicSearchWorks(count); assertAllSearchWorks(count); assertBasicAggregationWorks(); - assertRealtimeGetWorks(type); + assertRealtimeGetWorks(); assertStoredBinaryFields(count); } @@ -198,9 +196,6 @@ public void testNewReplicasWork() throws Exception { } { mappingsAndSettings.startObject("mappings"); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -208,21 +203,17 @@ public void testNewReplicasWork() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); indexRandomDocuments( - numDocs, true, false, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); + numDocs, true, false, randomBoolean(), i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); logger.info("Refreshing [{}]", index); client().performRequest(new Request("POST", "/" + index + "/_refresh")); } else { @@ -304,9 +295,6 @@ public void testShrink() throws IOException { { mappingsAndSettings.startObject("mappings"); { - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -316,30 +304,23 @@ public void testShrink() throws IOException { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster() == false) { - // the default number of shards is now one so we have to set the number of shards to be more than one explicitly - mappingsAndSettings.startObject("settings"); - { - mappingsAndSettings.field("index.number_of_shards", 5); - } - mappingsAndSettings.endObject(); + mappingsAndSettings.startObject("settings"); + { + mappingsAndSettings.field("index.number_of_shards", 5); } + mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); indexRandomDocuments( - numDocs, true, true, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); + numDocs, true, true, randomBoolean(), i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); ensureGreen(index); // wait for source index to be available on both nodes before starting shrink @@ -387,9 +368,6 @@ public void testShrinkAfterUpgrade() throws IOException { { mappingsAndSettings.startObject("mappings"); { - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.startObject(type); - } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -399,23 +377,17 @@ public void testShrinkAfterUpgrade() throws IOException { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster()) { - mappingsAndSettings.endObject(); - } } mappingsAndSettings.endObject(); - if (isRunningAgainstAncientCluster() == false) { - // the default number of shards is now one so we have to set the number of shards to be more than one explicitly - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("index.number_of_shards", 5); - mappingsAndSettings.endObject(); - } + // the default number of shards is now one so we have to set the number of shards to be more than one explicitly + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("index.number_of_shards", 5); + mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -423,6 +395,7 @@ public void testShrinkAfterUpgrade() throws IOException { numDocs, true, true, + randomBoolean(), i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() ); } else { @@ -491,15 +464,13 @@ public void testRollover() throws IOException { bulk.append("{\"index\":{}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request bulkRequest = new Request("POST", "/" + index + "_write/" + type + "/_bulk"); + Request bulkRequest = new Request("POST", "/" + index + "_write/_bulk"); bulkRequest.setJsonEntity(bulk.toString()); bulkRequest.addParameter("refresh", ""); - bulkRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); if (isRunningAgainstOldCluster()) { Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); - rolloverRequest.setOptions(allowTypesRemovalWarnings()); rolloverRequest.setJsonEntity("{" + " \"conditions\": {" + " \"max_docs\": 5" @@ -569,12 +540,10 @@ void assertAllSearchWorks(int count) throws IOException { // the 'string' field has a boost of 4 in the mappings so it should get a payload boost String stringValue = (String) XContentMapValues.extractValue("_source.string", bestHit); assertNotNull(stringValue); - String type = (String) bestHit.get("_type"); String id = (String) bestHit.get("_id"); - Request explainRequest = new Request("GET", "/" + index + "/" + type + "/" + id + "/_explain"); + Request explainRequest = new Request("GET", "/" + index + "/_explain" + "/" + id); explainRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); - explainRequest.setOptions(expectWarnings(RestExplainAction.TYPES_DEPRECATION_MESSAGE)); String explanation = toStr(client().performRequest(explainRequest)); assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); @@ -614,7 +583,7 @@ void assertBasicAggregationWorks() throws IOException { assertTotalHits(termsCount, boolTerms); } - void assertRealtimeGetWorks(final String typeName) throws IOException { + void assertRealtimeGetWorks() throws IOException { Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); disableAutoRefresh.setJsonEntity("{ \"index\": { \"refresh_interval\" : -1 }}"); client().performRequest(disableAutoRefresh); @@ -625,12 +594,11 @@ void assertRealtimeGetWorks(final String typeName) throws IOException { Map hit = (Map) ((List)(XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); String docId = (String) hit.get("_id"); - Request updateRequest = new Request("POST", "/" + index + "/" + typeName + "/" + docId + "/_update"); - updateRequest.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE)); + Request updateRequest = new Request("POST", "/" + index + "/_update/" + docId); updateRequest.setJsonEntity("{ \"doc\" : { \"foo\": \"bar\"}}"); client().performRequest(updateRequest); - Request getRequest = new Request("GET", "/" + index + "/" + typeName + "/" + docId); + Request getRequest = new Request("GET", "/" + index + "/" + type + "/" + docId); Map getRsp = entityAsMap(client().performRequest(getRequest)); Map source = (Map) getRsp.get("_source"); assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); @@ -696,7 +664,6 @@ public void testEmptyShard() throws IOException { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); if (randomBoolean()) { settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1"); } @@ -719,8 +686,13 @@ public void testRecovery() throws Exception { * an index without a translog so we randomize whether * or not we have one. */ shouldHaveTranslog = randomBoolean(); - - indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); + Settings.Builder settings = Settings.builder(); + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } + final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; + createIndex(index, settings.build(), mappings); + indexRandomDocuments(count, true, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); // make sure all recoveries are done ensureGreen(index); @@ -731,28 +703,26 @@ public void testRecovery() throws Exception { flushRequest.addParameter("wait_if_ongoing", "true"); assertOK(client().performRequest(flushRequest)); - if (randomBoolean()) { - performSyncedFlush(index, randomBoolean()); - } if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog indexRandomDocuments( - count / 10, - false, // flushing here would invalidate the whole thing - false, - i -> jsonBuilder().startObject().field("field", "value").endObject() + count / 10, + false, // flushing here would invalidate the whole thing + false, + true, + i -> jsonBuilder().startObject().field("field", "value").endObject() ); } - saveInfoDocument("should_have_translog", Boolean.toString(shouldHaveTranslog)); + saveInfoDocument(index + "_should_have_translog", Boolean.toString(shouldHaveTranslog)); } else { count = countOfIndexedRandomDocuments(); - shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument("should_have_translog")); + shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument(index + "_should_have_translog")); } // Count the documents in the index to make sure we have as many as we put there Request countRequest = new Request("GET", "/" + index + "/_search"); countRequest.addParameter("size", "0"); - refresh(); + refreshAllIndices(); Map countResponse = entityAsMap(client().performRequest(countRequest)); assertTotalHits(count, countResponse); @@ -785,6 +755,7 @@ public void testRecovery() throws Exception { String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString(); + String minCompatibleBWCVersion = Version.CURRENT.minimumCompatibilityVersion().luceneVersion.toString(); if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { int numCurrentVersion = 0; int numBwcVersion = 0; @@ -803,6 +774,10 @@ public void testRecovery() throws Exception { numCurrentVersion++; } else if (bwcLuceneVersion.equals(version)) { numBwcVersion++; + } else if (minCompatibleBWCVersion.equals(version) && minCompatibleBWCVersion.equals(bwcLuceneVersion) == false) { + // Our upgrade path from 7.non-last always goes through 7.last, which depending on timing can create 7.last + // index segment. We ignore those. + continue; } else { fail("expected version to be one of [" + currentLuceneVersion + "," + bwcLuceneVersion + "] but was " + line); } @@ -828,7 +803,12 @@ public void testSnapshotRestore() throws IOException { if (isRunningAgainstOldCluster()) { // Create the index count = between(200, 300); - indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); + Settings.Builder settings = Settings.builder(); + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } + createIndex(index, settings.build()); + indexRandomDocuments(count, true, true, randomBoolean(), i -> jsonBuilder().startObject().field("field", "value").endObject()); } else { count = countOfIndexedRandomDocuments(); } @@ -856,9 +836,6 @@ public void testSnapshotRestore() throws IOException { } templateBuilder.endObject(); templateBuilder.startObject("mappings"); { - if (isRunningAgainstAncientCluster()) { - templateBuilder.startObject(type); - } { templateBuilder.startObject("_source"); { @@ -866,9 +843,6 @@ public void testSnapshotRestore() throws IOException { } templateBuilder.endObject(); } - if (isRunningAgainstAncientCluster()) { - templateBuilder.endObject(); - } } templateBuilder.endObject(); templateBuilder.startObject("aliases"); { @@ -887,7 +861,6 @@ public void testSnapshotRestore() throws IOException { templateBuilder.endObject().endObject(); Request createTemplateRequest = new Request("PUT", "/_template/test_template"); createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); - createTemplateRequest.setOptions(allowTypesRemovalWarnings()); client().performRequest(createTemplateRequest); @@ -978,13 +951,10 @@ public void testSoftDeletes() throws Exception { int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); - Request request = new Request("POST", "/" + index + "/" + type + "/" + i); - if (isRunningAgainstAncientCluster() == false) { - request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); - } + Request request = new Request("POST", "/" + index + "/_doc/" + i); request.setJsonEntity(doc); client().performRequest(request); - refresh(); + refreshAllIndices(); } client().performRequest(new Request("POST", "/" + index + "/_flush")); int liveDocs = numDocs; @@ -992,19 +962,19 @@ public void testSoftDeletes() throws Exception { for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); - Request request = new Request("POST", "/" + index + "/" + type + "/" + i); + Request request = new Request("POST", "/" + index + "/_doc/" + i); request.setJsonEntity(doc); client().performRequest(request); } else if (randomBoolean()) { - client().performRequest(new Request("DELETE", "/" + index + "/" + type + "/" + i)); + client().performRequest(new Request("DELETE", "/" + index + "/_doc/" + i)); liveDocs--; } } - refresh(); + refreshAllIndices(); assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); - saveInfoDocument("doc_count", Integer.toString(liveDocs)); + saveInfoDocument(index + "_doc_count", Integer.toString(liveDocs)); } else { - int liveDocs = Integer.parseInt(loadInfoDocument("doc_count")); + int liveDocs = Integer.parseInt(loadInfoDocument(index + "_doc_count")); assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); } } @@ -1139,10 +1109,9 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver bulk.append("{\"index\":{\"_id\":\"").append(count + i).append("\"}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/" + type + "/_bulk"); + Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/_bulk"); writeToRestoredRequest.addParameter("refresh", "true"); writeToRestoredRequest.setJsonEntity(bulk.toString()); - writeToRestoredRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); assertThat(EntityUtils.toString(client().performRequest(writeToRestoredRequest).getEntity()), containsString("\"errors\":false")); // And count to make sure the add worked @@ -1150,7 +1119,7 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver Request countAfterWriteRequest = new Request("GET", "/restored_" + index + "/_search"); countAfterWriteRequest.addParameter("size", "0"); Map countAfterResponse = entityAsMap(client().performRequest(countRequest)); - assertTotalHits(count+extras, countAfterResponse); + assertTotalHits(count + extras, countAfterResponse); // Clean up the index for the next iteration client().performRequest(new Request("DELETE", "/restored_*")); @@ -1160,24 +1129,17 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver clusterSettingsRequest.addParameter("flat_settings", "true"); Map clusterSettingsResponse = entityAsMap(client().performRequest(clusterSettingsRequest)); @SuppressWarnings("unchecked") final Map persistentSettings = - (Map)clusterSettingsResponse.get("persistent"); + (Map) clusterSettingsResponse.get("persistent"); assertThat(persistentSettings.get("cluster.routing.allocation.exclude.test_attr"), equalTo(getOldClusterVersion().toString())); // Check that the template was restored successfully Request getTemplateRequest = new Request("GET", "/_template/test_template"); - getTemplateRequest.setOptions(allowTypesRemovalWarnings()); Map getTemplateResponse = entityAsMap(client().performRequest(getTemplateRequest)); Map expectedTemplate = new HashMap<>(); expectedTemplate.put("index_patterns", singletonList("evil_*")); expectedTemplate.put("settings", singletonMap("index", singletonMap("number_of_shards", "1"))); - // We don't have the type in the response starting with 7.0, but we won't have it on old cluster after upgrade - // either so look at the response to figure out the correct assertions - if (isTypeInTemplateResponse(getTemplateResponse)) { - expectedTemplate.put("mappings", singletonMap(type, singletonMap("_source", singletonMap("enabled", true)))); - } else { - expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); - } + expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); expectedTemplate.put("order", 0); Map aliases = new HashMap<>(); @@ -1193,32 +1155,23 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver } } - @SuppressWarnings("unchecked") - private boolean isTypeInTemplateResponse(Map getTemplateResponse) { - return ( (Map) ( - (Map) getTemplateResponse.getOrDefault("test_template", emptyMap()) - ).get("mappings")).get("_source") == null; - } - // TODO tests for upgrades after shrink. We've had trouble with shrink in the past. private void indexRandomDocuments( - final int count, - final boolean flushAllowed, - final boolean saveInfo, - final CheckedFunction docSupplier) - throws IOException { + final int count, + final boolean flushAllowed, + final boolean saveInfo, + final boolean specifyId, + final CheckedFunction docSupplier + ) throws IOException { logger.info("Indexing {} random documents", count); for (int i = 0; i < count; i++) { logger.debug("Indexing document [{}]", i); - Request createDocument = new Request("POST", "/" + index + "/" + type + "/" + i); - if (isRunningAgainstAncientCluster() == false) { - createDocument.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); - } + Request createDocument = new Request("POST", "/" + index + "/_doc/" + (specifyId ? i : "")); createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i))); client().performRequest(createDocument); if (rarely()) { - refresh(); + refreshAllIndices(); } if (flushAllowed && rarely()) { logger.debug("Flushing [{}]", index); @@ -1226,7 +1179,7 @@ private void indexRandomDocuments( } } if (saveInfo) { - saveInfoDocument("count", Integer.toString(count)); + saveInfoDocument(index + "_count", Integer.toString(count)); } } @@ -1237,25 +1190,22 @@ private void indexDocument(String id) throws IOException { } private int countOfIndexedRandomDocuments() throws IOException { - return Integer.parseInt(loadInfoDocument("count")); + return Integer.parseInt(loadInfoDocument(index + "_count")); } - private void saveInfoDocument(String type, String value) throws IOException { + private void saveInfoDocument(String id, String value) throws IOException { XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); infoDoc.field("value", value); infoDoc.endObject(); // Only create the first version so we know how many documents are created when the index is first created - Request request = new Request("PUT", "/info/" + this.type + "/" + index + "_" + type); + Request request = new Request("PUT", "/info/" + type + "/" + id); request.addParameter("op_type", "create"); request.setJsonEntity(Strings.toString(infoDoc)); - if (isRunningAgainstAncientCluster() == false) { - request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); - } client().performRequest(request); } - private String loadInfoDocument(String type) throws IOException { - Request request = new Request("GET", "/info/" + this.type + "/" + index + "_" + type); + private String loadInfoDocument(String id) throws IOException { + Request request = new Request("GET", "/info/_doc/" + id); request.addParameter("filter_path", "_source"); String doc = toStr(client().performRequest(request)); Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc); @@ -1263,10 +1213,6 @@ private String loadInfoDocument(String type) throws IOException { return m.group(1); } - private Object randomLenientBoolean() { - return randomFrom(new Object[] {"off", "no", "0", 0, "false", false, "on", "yes", "1", 1, "true", true}); - } - private void refresh() throws IOException { logger.debug("Refreshing [{}]", index); client().performRequest(new Request("POST", "/" + index + "/_refresh")); @@ -1337,7 +1283,9 @@ public void testOperationBasedRecovery() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + if (minimumNodeVersion().before(Version.V_2_0_0)) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } createIndex(index, settings.build()); ensureGreen(index); int committedDocs = randomIntBetween(100, 200); @@ -1392,7 +1340,9 @@ public void testRecoveryWithTranslogRetentionDisabled() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + if (minimumNodeVersion().before(Version.V_2_0_0)) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } if (randomBoolean()) { settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1"); } @@ -1415,7 +1365,7 @@ public void testRecoveryWithTranslogRetentionDisabled() throws Exception { if (randomBoolean()) { flush(index, randomBoolean()); } else if (randomBoolean()) { - performSyncedFlush(index, randomBoolean()); + syncedFlush(index, randomBoolean()); } saveInfoDocument("doc_count", Integer.toString(numDocs)); } @@ -1424,6 +1374,72 @@ public void testRecoveryWithTranslogRetentionDisabled() throws Exception { assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); } + public void testResize() throws Exception { + int numDocs; + if (isRunningAgainstOldCluster()) { + final Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); + } + final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; + createIndex(index, settings.build(), mappings); + numDocs = randomIntBetween(10, 1000); + for (int i = 0; i < numDocs; i++) { + indexDocument(Integer.toString(i)); + if (rarely()) { + flush(index, randomBoolean()); + } + } + saveInfoDocument("num_doc_" + index, Integer.toString(numDocs)); + ensureGreen(index); + } else { + ensureGreen(index); + numDocs = Integer.parseInt(loadInfoDocument("num_doc_" + index)); + int moreDocs = randomIntBetween(0, 100); + for (int i = 0; i < moreDocs; i++) { + indexDocument(Integer.toString(numDocs + i)); + if (rarely()) { + flush(index, randomBoolean()); + } + } + Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); + updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); + client().performRequest(updateSettingsRequest); + { + final String target = index + "_shrunken"; + Request shrinkRequest = new Request("PUT", "/" + index + "/_shrink/" + target); + Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); + } + shrinkRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); + client().performRequest(shrinkRequest); + ensureGreenLongWait(target); + assertNumHits(target, numDocs + moreDocs, 1); + } + { + final String target = index + "_split"; + Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); + } + Request splitRequest = new Request("PUT", "/" + index + "/_split/" + target); + splitRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); + client().performRequest(splitRequest); + ensureGreenLongWait(target); + assertNumHits(target, numDocs + moreDocs, 6); + } + { + final String target = index + "_cloned"; + client().performRequest(new Request("PUT", "/" + index + "/_clone/" + target)); + ensureGreenLongWait(target); + assertNumHits(target, numDocs + moreDocs, 3); + } + } + } + @SuppressWarnings("unchecked") public void testSystemIndexMetadataIsUpgraded() throws Exception { final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct " + @@ -1436,11 +1452,7 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\", \"_type\" : \"" + type + "\"}}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - if (isRunningAgainstAncientCluster() == false) { - bulk.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); - } + bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}}\n" + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); // start a async reindex job @@ -1529,16 +1541,17 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { } public void testEnableSoftDeletesOnRestore() throws Exception { + assumeTrue("soft deletes must be enabled on 2.0+", getOldClusterVersion().before(Version.V_2_0_0)); final String snapshot = "snapshot-" + index; if (isRunningAgainstOldCluster()) { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); createIndex(index, settings.build()); ensureGreen(index); int numDocs = randomIntBetween(0, 100); - indexRandomDocuments(numDocs, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); + indexRandomDocuments(numDocs, true, true, randomBoolean(), i -> jsonBuilder().startObject().field("field", "value").endObject()); // create repo XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { @@ -1592,7 +1605,7 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { createIndex(index, settings.build()); ensureGreen(index); int numDocs = randomIntBetween(0, 100); - indexRandomDocuments(numDocs, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); + indexRandomDocuments(numDocs, true, true, randomBoolean(), i -> jsonBuilder().startObject().field("field", "value").endObject()); // create repo XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 8e21998b50525..b133a6462a525 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -35,25 +35,29 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.seqno.SeqNoStats; -import org.opensearch.rest.action.document.RestGetAction; -import org.opensearch.rest.action.document.RestIndexAction; +import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; public class IndexingIT extends OpenSearchRestTestCase { @@ -61,9 +65,8 @@ public class IndexingIT extends OpenSearchRestTestCase { private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; - Request request = new Request("PUT", index + "/doc/" + id); + Request request = new Request("PUT", index + "/_doc/" + id); request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); - request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); assertOK(client().performRequest(request)); } return numDocs; @@ -295,6 +298,59 @@ public void testUpdateSnapshotStatus() throws Exception { request.setJsonEntity("{\"indices\": \"" + index + "\"}"); } + public void testSyncedFlushTransition() throws Exception { + Nodes nodes = buildNodeAndVersions(); + assumeTrue("bwc version is on 1.x or Legacy 7.x", nodes.getBWCVersion().before(Version.V_2_0_0)); + assumeFalse("no new node found", nodes.getNewNodes().isEmpty()); + assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); + // Allocate shards to new nodes then verify synced flush requests processed by old nodes/new nodes + String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); + int numShards = randomIntBetween(1, 10); + int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); + int totalShards = numShards * (numOfReplicas + 1); + final String index = "test_synced_flush"; + createIndex(index, Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put("index.routing.allocation.include._name", newNodes).build()); + ensureGreen(index); + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient oldNodeClient = buildClient(restClientSettings(), + nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush/synced"); + assertBusy(() -> { + ResponseException responseException = expectThrows(ResponseException.class, () -> oldNodeClient.performRequest(request)); + assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); + assertThat(responseException.getResponse().getWarnings(), + contains("Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.")); + Map result = ObjectPath.createFromResponse(responseException.getResponse()).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(0)); + assertThat(result.get("failed"), equalTo(totalShards)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient newNodeClient = buildClient(restClientSettings(), + nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush/synced"); + List warningMsg = Arrays.asList("Synced flush was removed and a normal flush was performed instead. " + + "This transition will be removed in a future version."); + RequestOptions.Builder requestOptionsBuilder = RequestOptions.DEFAULT.toBuilder(); + requestOptionsBuilder.setWarningsHandler(warnings -> warnings.equals(warningMsg) == false); + request.setOptions(requestOptionsBuilder); + assertBusy(() -> { + Map result = ObjectPath.createFromResponse(newNodeClient.performRequest(request)).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(totalShards)); + assertThat(result.get("failed"), equalTo(0)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + } + private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { Request request = new Request("GET", index + "/_count"); request.addParameter("preference", preference); @@ -305,9 +361,8 @@ private void assertCount(final String index, final String preference, final int } private void assertVersion(final String index, final int docId, final String preference, final int expectedVersion) throws IOException { - Request request = new Request("GET", index + "/doc/" + docId); + Request request = new Request("GET", index + "/_doc/" + docId); request.addParameter("preference", preference); - request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); final Response response = client().performRequest(request); assertOK(response); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index 35d9c02e7e362..f8a31c5ec9214 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -96,10 +96,6 @@ --- "skip_unavailable is returned as part of _remote/info response": - - skip: - version: " - 6.0.99" - reason: "skip_unavailable is only returned from 6.1.0 on" - - do: cluster.get_settings: include_defaults: true diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml index 9d94e7d5abb3f..cc75ce692e6bf 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml @@ -24,7 +24,7 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "single_doc_index", "_type": "test_type"}}' + - '{"index": {"_index": "single_doc_index"}}' - '{"f1": "local_cluster", "sort_field": 0}' - do: search: diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java index e5464e8ee8d28..898ea12b6a6c3 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java @@ -85,6 +85,7 @@ public void test20PluginsListWithNoPlugins() throws Exception { public void test30MissingBundledJdk() throws Exception { final Installation.Executables bin = installation.executables(); sh.getEnv().remove("JAVA_HOME"); + sh.getEnv().remove("OPENSEARCH_JAVA_HOME"); final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); @@ -105,6 +106,7 @@ public void test30MissingBundledJdk() throws Exception { public void test31BadJavaHome() throws Exception { final Installation.Executables bin = installation.executables(); + sh.getEnv().remove("OPENSEARCH_JAVA_HOME"); sh.getEnv().put("JAVA_HOME", "doesnotexist"); // ask for opensearch version to quickly exit if java is actually found (ie test failure) @@ -114,11 +116,23 @@ public void test31BadJavaHome() throws Exception { } + public void test31BadOpensearchJavaHome() throws Exception { + final Installation.Executables bin = installation.executables(); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", "doesnotexist"); + + // ask for opensearch version to quickly exit if java is actually found (ie test failure) + final Result runResult = sh.runIgnoreExitCode(bin.opensearch.toString() + " -V"); + assertThat(runResult.exitCode, is(1)); + assertThat(runResult.stderr, containsString("could not find java in OPENSEARCH_JAVA_HOME")); + + } + public void test32SpecialCharactersInJdkPath() throws Exception { final Installation.Executables bin = installation.executables(); assumeTrue("Only run this test when we know where the JDK is.", distribution().hasJdk); final Path relocatedJdk = installation.bundledJdk.getParent().resolve("a (special) path"); + sh.getEnv().remove("OPENSEARCH_JAVA_HOME"); sh.getEnv().put("JAVA_HOME", relocatedJdk.toString()); try { @@ -154,6 +168,8 @@ public void test50StartAndStop() throws Exception { } public void test51JavaHomeOverride() throws Exception { + sh.getEnv().remove("OPENSEARCH_JAVA_HOME"); + Platforms.onLinux(() -> { String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); sh.getEnv().put("JAVA_HOME", systemJavaHome1); @@ -171,8 +187,29 @@ public void test51JavaHomeOverride() throws Exception { assertThat(FileUtils.slurpAllLogs(installation.logs, "opensearch.log", "*.log.gz"), containsString(systemJavaHome1)); } - public void test52BundledJdkRemoved() throws Exception { + public void test51OpensearchJavaHomeOverride() throws Exception { + Platforms.onLinux(() -> { + String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", systemJavaHome1); + sh.getEnv().put("JAVA_HOME", "doesnotexist"); + }); + Platforms.onWindows(() -> { + final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", systemJavaHome1); + sh.getEnv().put("JAVA_HOME", "doesnotexist"); + }); + + startOpenSearch(); + ServerUtils.runOpenSearchTests(); + stopOpenSearch(); + + String systemJavaHome1 = sh.getEnv().get("OPENSEARCH_JAVA_HOME"); + assertThat(FileUtils.slurpAllLogs(installation.logs, "opensearch.log", "*.log.gz"), containsString(systemJavaHome1)); + } + + public void test52JavaHomeBundledJdkRemoved() throws Exception { assumeThat(distribution().hasJdk, is(true)); + sh.getEnv().remove("OPENSEARCH_JAVA_HOME"); Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); try { @@ -197,7 +234,37 @@ public void test52BundledJdkRemoved() throws Exception { } } + public void test52OpensearchJavaHomeBundledJdkRemoved() throws Exception { + assumeThat(distribution().hasJdk, is(true)); + + Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + try { + mv(installation.bundledJdk, relocatedJdk); + Platforms.onLinux(() -> { + String systemJavaHome1 = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", systemJavaHome1); + sh.getEnv().put("JAVA_HOME", "doesnotexist"); + }); + Platforms.onWindows(() -> { + final String systemJavaHome1 = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", systemJavaHome1); + sh.getEnv().put("JAVA_HOME", "doesnotexist"); + }); + + startOpenSearch(); + ServerUtils.runOpenSearchTests(); + stopOpenSearch(); + + String systemJavaHome1 = sh.getEnv().get("OPENSEARCH_JAVA_HOME"); + assertThat(FileUtils.slurpAllLogs(installation.logs, "opensearch.log", "*.log.gz"), containsString(systemJavaHome1)); + } finally { + mv(relocatedJdk, installation.bundledJdk); + } + } + public void test53JavaHomeWithSpecialCharacters() throws Exception { + sh.getEnv().remove("OPENSEARCH_JAVA_HOME"); + Platforms.onWindows(() -> { String javaPath = "C:\\Program Files (x86)\\java"; try { @@ -250,6 +317,7 @@ public void test54ForceBundledJdkEmptyJavaHome() throws Exception { // cleanup from previous test rm(installation.config("opensearch.keystore")); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", ""); sh.getEnv().put("JAVA_HOME", ""); startOpenSearch(); diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java index 3294ffb3bbe45..259ae6e766c8e 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java @@ -33,6 +33,7 @@ package org.opensearch.packaging.test; import com.carrotsearch.randomizedtesting.JUnit3MethodProvider; +import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.RandomizedRunner; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import com.carrotsearch.randomizedtesting.annotations.TestGroup; @@ -182,11 +183,19 @@ public void setup() throws Exception { sh.reset(); if (distribution().hasJdk == false) { - Platforms.onLinux(() -> sh.getEnv().put("JAVA_HOME", systemJavaHome)); - Platforms.onWindows(() -> sh.getEnv().put("JAVA_HOME", systemJavaHome)); + // Randomly switch between JAVA_HOME and OPENSEARCH_JAVA_HOME + final String javaHomeEnv = randomBoolean() ? "JAVA_HOME" : "OPENSEARCH_JAVA_HOME"; + logger.info("Using " + javaHomeEnv); + + Platforms.onLinux(() -> sh.getEnv().put(javaHomeEnv, systemJavaHome)); + Platforms.onWindows(() -> sh.getEnv().put(javaHomeEnv, systemJavaHome)); } } + private boolean randomBoolean() { + return RandomizedContext.current().getRandom().nextBoolean(); + } + @After public void teardown() throws Exception { if (installation != null && failed == false) { diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java index 50540f3ac5233..57ea853e735a9 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java @@ -149,12 +149,20 @@ public void test13InstallMissingBundledJdk() throws IOException { } public void test14InstallBadJavaHome() throws IOException { + sh.getEnv().put("OPENSEARCH_JAVA_HOME", ""); sh.getEnv().put("JAVA_HOME", "doesnotexist"); Result result = sh.runIgnoreExitCode(serviceScript + " install"); assertThat(result.exitCode, equalTo(1)); assertThat(result.stderr, containsString("could not find java in JAVA_HOME")); } + public void test14InstallBadOpensearchJavaHome() throws IOException { + sh.getEnv().put("OPENSEARCH_JAVA_HOME", "doesnotexist"); + Result result = sh.runIgnoreExitCode(serviceScript + " install"); + assertThat(result.exitCode, equalTo(1)); + assertThat(result.stderr, containsString("could not find java in OPENSEARCH_JAVA_HOME")); + } + public void test15RemoveNotInstalled() { Result result = assertFailure(serviceScript + " remove", 1); assertThat(result.stdout, containsString("Failed removing '" + DEFAULT_ID + "' service")); @@ -163,6 +171,7 @@ public void test15RemoveNotInstalled() { public void test16InstallSpecialCharactersInJdkPath() throws IOException { assumeTrue("Only run this test when we know where the JDK is.", distribution().hasJdk); final Path relocatedJdk = installation.bundledJdk.getParent().resolve("a (special) jdk"); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", ""); sh.getEnv().put("JAVA_HOME", relocatedJdk.toString()); try { @@ -248,6 +257,7 @@ public void test32StopNotStarted() throws IOException { public void test33JavaChanged() throws Exception { final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", ""); try { mv(installation.bundledJdk, relocatedJdk); @@ -261,6 +271,22 @@ public void test33JavaChanged() throws Exception { } } + public void test33OpensearchJavaChanged() throws Exception { + final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + sh.getEnv().put("JAVA_HOME", ""); + + try { + mv(installation.bundledJdk, relocatedJdk); + sh.getEnv().put("OPENSEARCH_JAVA_HOME", relocatedJdk.toString()); + assertCommand(serviceScript + " install"); + sh.getEnv().remove("OPENSEARCH_JAVA_HOME"); + assertCommand(serviceScript + " start"); + assertStartedAndStop(); + } finally { + mv(relocatedJdk, installation.bundledJdk); + } + } + public void test60Manager() throws IOException { Path serviceMgr = installation.bin("opensearch-service-mgr.exe"); Path tmpServiceMgr = serviceMgr.getParent().resolve(serviceMgr.getFileName() + ".tmp"); diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Cleanup.java b/qa/os/src/test/java/org/opensearch/packaging/util/Cleanup.java index 6bd7f07320350..d18c0d8d7cca1 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Cleanup.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Cleanup.java @@ -75,16 +75,14 @@ public static void cleanEverything() throws Exception { sh.runIgnoreExitCode("ps aux | grep -i 'org.opensearch.bootstrap.OpenSearch' | awk {'print $2'} | xargs kill -9"); }); - Platforms.onWindows( - () -> { - // the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here - sh.runIgnoreExitCode( - "Get-WmiObject Win32_Process | " - + "Where-Object { $_.CommandLine -Match 'org.opensearch.bootstrap.OpenSearch' } | " - + "ForEach-Object { $_.Terminate() }" - ); - } - ); + Platforms.onWindows(() -> { + // the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here + sh.runIgnoreExitCode( + "Get-WmiObject Win32_Process | " + + "Where-Object { $_.CommandLine -Match 'org.opensearch.bootstrap.OpenSearch' } | " + + "ForEach-Object { $_.Terminate() }" + ); + }); Platforms.onLinux(Cleanup::purgePackagesLinux); diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java b/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java index dcc6829eb4143..d92feec21daaf 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java @@ -198,12 +198,12 @@ public static void waitForOpenSearch(String status, String index, Installation i public static void runOpenSearchTests() throws Exception { makeRequest( - Request.Post("http://localhost:9200/library/book/1?refresh=true&pretty") + Request.Post("http://localhost:9200/library/_doc/1?refresh=true&pretty") .bodyString("{ \"title\": \"Book #1\", \"pages\": 123 }", ContentType.APPLICATION_JSON) ); makeRequest( - Request.Post("http://localhost:9200/library/book/2?refresh=true&pretty") + Request.Post("http://localhost:9200/library/_doc/2?refresh=true&pretty") .bodyString("{ \"title\": \"Book #2\", \"pages\": 456 }", ContentType.APPLICATION_JSON) ); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index 79745b1cc2f95..f34e5f7bc121a 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -205,12 +205,11 @@ public void testAutoIdWithOpTypeCreate() throws IOException { private void bulk(String index, String valueSuffix, int count) throws IOException { StringBuilder b = new StringBuilder(); for (int i = 0; i < count; i++) { - b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"_doc\"}}\n"); + b.append("{\"index\": {\"_index\": \"").append(index).append("\"}}\n"); b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); } Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); bulk.setJsonEntity(b.toString()); client().performRequest(bulk); } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index 5507a5a221473..687fd1743c3d3 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -48,9 +48,6 @@ import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; import org.opensearch.rest.RestStatus; -import org.opensearch.rest.action.document.RestGetAction; -import org.opensearch.rest.action.document.RestIndexAction; -import org.opensearch.rest.action.document.RestUpdateAction; import org.opensearch.test.rest.yaml.ObjectPath; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -67,7 +64,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Predicate; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; @@ -123,9 +120,8 @@ public void testHistoryUUIDIsGenerated() throws Exception { private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; - Request indexDoc = new Request("PUT", index + "/test/" + id); - indexDoc.setJsonEntity("{\"test\": \"test_" + randomAsciiOfLength(2) + "\"}"); - indexDoc.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + Request indexDoc = new Request("PUT", index + "/_doc/" + id); + indexDoc.setJsonEntity("{\"test\": \"test_" + randomAsciiLettersOfLength(2) + "\"}"); client().performRequest(indexDoc); } return numDocs; @@ -322,13 +318,13 @@ public void testRelocationWithConcurrentIndexing() throws Exception { throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } if (randomBoolean()) { - performSyncedFlush(index, randomBoolean()); + syncedFlush(index, randomBoolean()); ensureGlobalCheckpointSynced(index); } } public void testRecovery() throws Exception { - final String index = "recover_with_soft_deletes"; + final String index = "test_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) @@ -339,7 +335,7 @@ public void testRecovery() throws Exception { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (randomBoolean()) { + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -360,6 +356,9 @@ public void testRecovery() throws Exception { } } } + if (randomBoolean()) { + syncedFlush(index, randomBoolean()); + } ensureGreen(index); } @@ -370,8 +369,10 @@ public void testRetentionLeasesEstablishedWhenPromotingPrimary() throws Exceptio .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(1, 5)) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(1, 2)) // triggers nontrivial promotion .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") - .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0") // fail faster - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } createIndex(index, settings.build()); int numDocs = randomInt(10); indexDocs(index, 0, numDocs); @@ -391,8 +392,10 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(1, 5)) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, 1)) .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") - .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0") // fail faster - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } createIndex(index, settings.build()); int numDocs = randomInt(10); indexDocs(index, 0, numDocs); @@ -652,8 +655,7 @@ public void testUpdateDoc() throws Exception { final int times = randomIntBetween(0, 2); for (int i = 0; i < times; i++) { long value = randomNonNegativeLong(); - Request update = new Request("POST", index + "/test/" + docId + "/_update"); - update.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE)); + Request update = new Request("POST", index + "/_update/" + docId); update.setJsonEntity("{\"doc\": {\"updated_field\": " + value + "}}"); client().performRequest(update); updates.put(docId, value); @@ -661,13 +663,12 @@ public void testUpdateDoc() throws Exception { } client().performRequest(new Request("POST", index + "/_refresh")); for (int docId : updates.keySet()) { - Request get = new Request("GET", index + "/test/" + docId); - get.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request get = new Request("GET", index + "/_doc/" + docId); Map doc = entityAsMap(client().performRequest(get)); assertThat(XContentMapValues.extractValue("_source.updated_field", doc), equalTo(updates.get(docId))); } if (randomBoolean()) { - performSyncedFlush(index, randomBoolean()); + syncedFlush(index, randomBoolean()); ensureGlobalCheckpointSynced(index); } } @@ -713,10 +714,13 @@ private void assertNoopRecoveries(String indexName, Predicate targetNode public void testOperationBasedRecovery() throws Exception { final String index = "test_operation_based_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { - createIndex(index, Settings.builder() + final Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()).build()); + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } + createIndex(index, settings.build()); ensureGreen(index); indexDocs(index, 0, randomIntBetween(100, 200)); flush(index, randomBoolean()); @@ -791,7 +795,7 @@ public void testSoftDeletesDisabledWarning() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { boolean softDeletesEnabled = true; Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); - if (randomBoolean()) { + if (minimumNodeVersion().before(Version.V_2_0_0) && randomBoolean()) { softDeletesEnabled = randomBoolean(); settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), softDeletesEnabled); } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml index 83df474d70d89..89992eeba616f 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml @@ -80,9 +80,6 @@ --- "Create index with java style index in 6": - - skip: - version: " - 6.7.99, 7.0.0 -" - reason: java.time patterns are allowed since 6.8 - do: indices.create: index: java_for_range diff --git a/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml b/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml index 5a3f64151f4ed..7a0cdcbef0786 100644 --- a/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml +++ b/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml @@ -54,7 +54,6 @@ "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -74,7 +73,6 @@ catch: /There are no ingest nodes in this cluster, unable to forward request to an ingest node./ index: index: test - type: test id: 1 pipeline: "my_pipeline_1" body: { @@ -92,12 +90,10 @@ body: - index: _index: test_index - _type: test_type _id: test_id - f1: v1 - index: _index: test_index - _type: test_type _id: test_id2 - f1: v2 @@ -109,12 +105,10 @@ body: - index: _index: test_index - _type: test_type _id: test_id - f1: v1 - index: _index: test_index - _type: test_type _id: test_id2 pipeline: my_pipeline_1 - f1: v2 diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/IngestDocumentMustacheIT.java index 5b2468b6304b1..83643f3217720 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/IngestDocumentMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/IngestDocumentMustacheIT.java @@ -46,7 +46,7 @@ public class IngestDocumentMustacheIT extends AbstractScriptTestCase { public void testAccessMetadataViaTemplate() { Map document = new HashMap<>(); document.put("foo", "bar"); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "id", null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 bar")); @@ -61,7 +61,7 @@ public void testAccessMapMetadataViaTemplate() { innerObject.put("baz", "hello baz"); innerObject.put("qux", Collections.singletonMap("fubar", "hello qux and fubar")); document.put("foo", innerObject); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "id", null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 hello bar hello baz hello qux and fubar")); @@ -80,7 +80,7 @@ public void testAccessListMetadataViaTemplate() { list.add(value); list.add(null); document.put("list2", list); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "id", null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 foo {field=value}")); } @@ -90,7 +90,7 @@ public void testAccessIngestMetadataViaTemplate() { Map ingestMap = new HashMap<>(); ingestMap.put("timestamp", "bogus_timestamp"); document.put("_ingest", ingestMap); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "id", null, null, null, document); ingestDocument.setFieldValue(compile("ingest_timestamp"), ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", scriptService)); assertThat(ingestDocument.getFieldValue("ingest_timestamp", String.class), diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/ValueSourceMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/ValueSourceMustacheIT.java index 83641cca6156a..2804c73032f1b 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/ValueSourceMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/opensearch/ingest/ValueSourceMustacheIT.java @@ -77,7 +77,7 @@ public void testValueSourceWithTemplates() { } public void testAccessSourceViaTemplate() { - IngestDocument ingestDocument = new IngestDocument("marvel", "type", "id", null, null, null, new HashMap<>()); + IngestDocument ingestDocument = new IngestDocument("marvel", "id", null, null, null, new HashMap<>()); assertThat(ingestDocument.hasField("marvel"), is(false)); ingestDocument.setFieldValue(compile("{{_index}}"), ValueSource.wrap("{{_index}}", scriptService)); assertThat(ingestDocument.getFieldValue("marvel", String.class), equalTo("marvel")); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml index 5ffaebdcaef36..e6a2a3d52e116 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml @@ -14,13 +14,13 @@ { "set" : { "field" : "index_type_id", - "value": "{{_index}}/{{_type}}/{{_id}}" + "value": "{{_index}}/{{_id}}" } }, { "append" : { "field" : "metadata", - "value": ["{{_index}}", "{{_type}}", "{{_id}}"] + "value": ["{{_index}}", "{{_id}}"] } } ] @@ -30,7 +30,6 @@ - do: index: index: test - type: test id: 1 pipeline: "my_pipeline_1" body: {} @@ -38,11 +37,10 @@ - do: get: index: test - type: test id: 1 - length: { _source: 2 } - - match: { _source.index_type_id: "test/test/1" } - - match: { _source.metadata: ["test", "test", "1"] } + - match: { _source.index_type_id: "test/1" } + - match: { _source.metadata: ["test", "1"] } --- "Test templating": @@ -110,7 +108,6 @@ - do: index: index: test - type: test id: 1 pipeline: "my_pipeline_1" body: { @@ -123,7 +120,6 @@ - do: get: index: test - type: test id: 1 - length: { _source: 5 } - match: { _source.field1: "1" } @@ -135,7 +131,6 @@ - do: index: index: test - type: test id: 1 pipeline: "my_pipeline_2" body: { @@ -145,7 +140,6 @@ - do: get: index: test - type: test id: 1 - length: { _source: 2 } - match: { _source.field1: "field2" } @@ -154,7 +148,6 @@ - do: index: index: test - type: test id: 1 pipeline: "my_pipeline_3" body: { @@ -165,7 +158,6 @@ - do: get: index: test - type: test id: 1 - length: { _source: 1 } - match: { _source.field_to_remove: "field2" } @@ -204,7 +196,6 @@ - do: index: index: test - type: test id: 1 pipeline: "my_handled_pipeline" body: { @@ -214,7 +205,6 @@ - do: get: index: test - type: test id: 1 - length: { _source: 2 } - match: { _source.do_nothing: "foo" } @@ -246,7 +236,6 @@ - do: index: index: test - type: test id: 1 pipeline: "_id" body: { @@ -268,7 +257,6 @@ - do: get: index: test - type: test id: 1 - length: { _source: 2 } - match: { _source.values_flat: ["foo_bar", "foo_baz"] } @@ -307,7 +295,6 @@ "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml index c121d542c86b1..27f7f804ead1c 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml @@ -1,10 +1,5 @@ --- "Test with date processor": - - skip: - version: " - 6.9.99" - reason: pre-7.0.0 requires the 8 prefix for Java time formats, so would treat the format in this test as a Joda time format - features: "warnings" - - do: ingest.put_pipeline: id: "_id" @@ -48,7 +43,6 @@ - do: index: index: test - type: test id: 1 pipeline: "_id" body: { @@ -58,7 +52,6 @@ - do: get: index: test - type: test id: 1 - length: { _source: 13 } - match: { _source.request: "/presentations/logstash-scale11x/images/ahhh___rage_face_by_samusmmx-d5g5zap.png" } @@ -137,7 +130,6 @@ - do: index: index: test - type: test id: 1 pipeline: "_id" body: { @@ -178,7 +170,6 @@ - do: get: index: test - type: test id: 1 - length: { _source: 11 } - is_false: _source.friends.0.id diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/30_update_by_query_with_ingest.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/30_update_by_query_with_ingest.yml index 18929c47a4027..5eedae174eaa9 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/30_update_by_query_with_ingest.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/30_update_by_query_with_ingest.yml @@ -18,7 +18,6 @@ - do: index: index: twitter - type: _doc id: 1 body: { "user": "foobar" } - do: diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/40_reindex_with_ingest.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/40_reindex_with_ingest.yml index e34ca43a62282..eb1c0d2eef7c5 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/40_reindex_with_ingest.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/40_reindex_with_ingest.yml @@ -18,7 +18,6 @@ - do: index: index: twitter - type: tweet id: 1 body: { "user": "foobar" } - do: diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml index cce388c2ff737..eaf6b24030a06 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml @@ -22,7 +22,6 @@ - do: index: index: test - type: test id: 1 pipeline: "my_pipeline" body: { bytes_in: 1234, bytes_out: 4321 } @@ -30,7 +29,6 @@ - do: get: index: test - type: test id: 1 - match: { _source.bytes_in: 1234 } - match: { _source.bytes_out: 4321 } @@ -72,7 +70,6 @@ - do: index: index: test - type: test id: 1 pipeline: "my_pipeline" body: { bytes_in: 1234, bytes_out: 4321 } @@ -80,7 +77,6 @@ - do: get: index: test - type: test id: 1 - match: { _source.bytes_in: 1234 } - match: { _source.bytes_out: 4321 } diff --git a/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java b/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java index 72400a5705162..5ae9944429d21 100644 --- a/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java +++ b/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java @@ -141,7 +141,7 @@ public void testRecoverReplica() throws Exception { if (randomBoolean()) { flush(index, randomBoolean()); } else if (randomBoolean()) { - performSyncedFlush(index, randomBoolean()); + syncedFlush(index, randomBoolean()); } } ensureGreen(index); diff --git a/release-notes/opensearch.release-notes-1.2.4.md b/release-notes/opensearch.release-notes-1.2.4.md new file mode 100644 index 0000000000000..dc2852a102c44 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.2.4.md @@ -0,0 +1,76 @@ +## 2022-01-18 Version 1.2.4 + +* __Update FIPS API libraries of Bouncy Castle (#1853) (#1888)__ + + [Tianli Feng](mailto:ftl94@live.com) - Thu, 13 Jan 2022 10:48:38 -0500 + + EAD -> refs/heads/1.2, tag: refs/tags/1.2.4, refs/remotes/upstream/1.2 + * Update bc-fips to 1.0.2.1 + Signed-off-by: Tianli Feng <ftl94@live.com> + + * Update bcpg-fips to 1.0.5.1 + Signed-off-by: Tianli Feng <ftl94@live.com> + + * Update bctls-fips to 1.0.12.2 + Signed-off-by: Tianli Feng <ftl94@live.com> + + * Use the unified bouncycastle version for bcpkix-jdk15on in HDFS testing + fixture + Signed-off-by: Tianli Feng <ftl94@live.com> + +* __[Backport 1.2] Replace JCenter with Maven Central. (#1057) and update plugin repository order. (#1894)__ + + [Marc Handalian](mailto:handalm@amazon.com) - Wed, 12 Jan 2022 15:18:22 -0800 + + + * Replace JCenter with Maven Central. (#1057) + On February 3 2021, JFrog + [announced](https://jfrog.com/blog/into-the-sunset-bintray-jcenter-gocenter-and-chartcenter/) + the shutdown of JCenter. Later on April 27 2021, an update was provided that + the repository will only be read only and new package and versions are no + longer accepted on JCenter. This means we should no longer use JCenter for our + central artifacts repository. + This change replaces JCenter with Maven Central as per the Gradle + recommendation - https://blog.gradle.org/jcenter-shutdown + Signed-off-by: Rabi Panda <adnapibar@gmail.com> + + * Define plugin repositories order in settings.gradle. + Signed-off-by: Marc Handalian <handalm@amazon.com> + Co-authored-by: Rabi Panda <adnapibar@gmail.com> + +* __Updatting Netty to 4.1.72.Final (#1831) (#1890)__ + + [Sarat Vemulapalli](mailto:vemulapallisarat@gmail.com) - Wed, 12 Jan 2022 08:29:25 -0800 + + + Signed-off-by: Sarat Vemulapalli <vemulapallisarat@gmail.com> + +* __Upgrading bouncycastle to 1.70 (#1832) (#1889)__ + + [Sarat Vemulapalli](mailto:vemulapallisarat@gmail.com) - Tue, 11 Jan 2022 17:20:29 -0800 + + + Signed-off-by: Sarat Vemulapalli <vemulapallisarat@gmail.com> + +* __RestIntegTestTask fails because of missed log4j-core dependency (#1815) (#1819)__ + + [Andriy Redko](mailto:andriy.redko@aiven.io) - Tue, 28 Dec 2021 17:47:13 -0500 + + + Signed-off-by: Andriy Redko <andriy.redko@aiven.io> + +* __Update to log4j 2.17.1 (#1820) (#1823)__ + + [Andriy Redko](mailto:andriy.redko@aiven.io) - Tue, 28 Dec 2021 17:46:53 -0500 + + + Signed-off-by: Andriy Redko <andriy.redko@aiven.io> + +* __Prepare for next development iteration, 1.2.4. (#1792)__ + + [Daniel Doubrovkine (dB.)](mailto:dblock@dblock.org) - Wed, 22 Dec 2021 15:11:11 -0800 + + + Signed-off-by: dblock <dblock@amazon.com> + + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 9f6461b16d3eb..8cdb3db7c12cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -27,27 +27,6 @@ "description":"A comma-separated list of indices to restrict the results" } } - }, - { - "path":"/{index}/{type}/_count", - "methods":[ - "POST", - "GET" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of indices to restrict the results" - }, - "type": { - "type" : "list", - "description" : "A comma-separated list of types to restrict the results" - } - }, - "deprecated": { - "version" : "7.0.0", - "description" : "Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index 09042376a256b..fd221b474a070 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -22,31 +22,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}", - "methods":[ - "HEAD" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document (use `_all` to fetch the first document matching the ID across all types)", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 0c8d62d6d1d34..2ce77f17aff10 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -22,31 +22,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}", - "methods":[ - "GET" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document (use `_all` to fetch the first document matching the ID across all types)", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index e5336059d3924..ad79678388590 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -22,31 +22,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_source", - "methods":[ - "GET" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document; deprecated and optional starting with 7.0", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 37f3cc9f9f82b..b4865403331b0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -35,53 +35,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}", - "methods":[ - "POST" - ], - "parts":{ - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/{index}/{type}/{id}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json deleted file mode 100644 index 134ba93395b40..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "indices.flush_synced":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html", - "description":"Performs a synced flush operation on one or more indices. Synced flush is deprecated and will be removed in 8.0. Use flush instead" - }, - "stability":"stable", - "url":{ - "paths":[ - { - "path":"/_flush/synced", - "methods":[ - "POST", - "GET" - ], - "deprecated":{ - "version":"7.6.0", - "description":"Synced flush is deprecated and will be removed in 8.0. Use flush instead." - } - }, - { - "path":"/{index}/_flush/synced", - "methods":[ - "POST", - "GET" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names; use `_all` or empty string for all indices" - } - }, - "deprecated":{ - "version":"7.6.0", - "description":"Synced flush is deprecated and will be removed in 8.0. Use flush instead." - } - } - ] - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "none", - "all" - ], - "default":"open", - "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json index 15cc48a582cc4..ee96dfcc21ccd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json @@ -34,52 +34,6 @@ "description":"A comma-separated list of fields" } } - }, - { - "path":"/_mapping/{type}/field/{fields}", - "methods":[ - "GET" - ], - "parts":{ - "type":{ - "type":"list", - "description":"A comma-separated list of document types", - "deprecated":true - }, - "fields":{ - "type":"list", - "description":"A comma-separated list of fields" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/{index}/_mapping/{type}/field/{fields}", - "methods":[ - "GET" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names" - }, - "type":{ - "type":"list", - "description":"A comma-separated list of document types", - "deprecated":true - }, - "fields":{ - "type":"list", - "description":"A comma-separated list of fields" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json index 1d9e795c6ed5d..24fd668069697 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json @@ -24,52 +24,10 @@ "description":"A comma-separated list of index names" } } - }, - { - "path":"/_mapping/{type}", - "methods":[ - "GET" - ], - "parts":{ - "type":{ - "type":"list", - "description":"A comma-separated list of document types", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/{index}/_mapping/{type}", - "methods":[ - "GET" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names" - }, - "type":{ - "type":"list", - "description":"A comma-separated list of document types", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, "params":{ - "include_type_name":{ - "type":"boolean", - "description":"Whether to add the type name to the response (default: false)" - }, "ignore_unavailable":{ "type":"boolean", "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index f23380ac2f1ac..451cbccd8d329 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -19,155 +19,10 @@ "description":"A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices." } } - }, - { - "path":"/{index}/{type}/_mapping", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices." - }, - "type":{ - "type":"string", - "description":"The name of the document type", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/{index}/_mapping/{type}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices." - }, - "type":{ - "type":"string", - "description":"The name of the document type", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/{index}/{type}/_mappings", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices." - }, - "type":{ - "type":"string", - "description":"The name of the document type", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/{index}/_mappings/{type}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices." - }, - "type":{ - "type":"string", - "description":"The name of the document type", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/_mappings/{type}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "type":{ - "type":"string", - "description":"The name of the document type", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } - }, - { - "path":"/{index}/_mappings", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices." - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"The plural mappings is accepted but only /_mapping is documented" - } - }, - { - "path":"/_mapping/{type}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "type":{ - "type":"string", - "description":"The name of the document type", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, "params":{ - "include_type_name":{ - "type":"boolean", - "description":"Whether a type should be expected in the body of the mappings." - }, "timeout":{ "type":"time", "description":"Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json index f1d35aee7d62f..e0b58139ed684 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json @@ -26,28 +26,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/_mget", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, @@ -86,7 +64,7 @@ } }, "body":{ - "description":"Document identifiers; can be either `docs` (containing full document information) or `ids` (when index and type is provided in the URL.", + "description":"Document identifiers; can be either `docs` (containing full document information) or `ids` (when index is provided in the URL.", "required":true } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml index 59612af74616c..8c8c6d50abf41 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml @@ -1,10 +1,5 @@ --- "Array of objects": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: bulk: refresh: true @@ -28,11 +23,6 @@ --- "Empty _id": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: bulk: refresh: true @@ -107,12 +97,8 @@ --- "empty action": - - skip: - version: " - 6.99.99" - features: headers - reason: types are required in requests before 7.0.0 - + features: headers - do: catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/ headers: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml deleted file mode 100644 index 6bebed7bc1dd0..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -"Array of objects": - - do: - bulk: - refresh: true - body: - - index: - _index: test_index - _type: test_type - _id: test_id - - f1: v1 - f2: 42 - - index: - _index: test_index - _type: test_type - _id: test_id2 - - f1: v2 - f2: 47 - - - do: - count: - index: test_index - - - match: {count: 2} - ---- -"Empty _id": - - do: - bulk: - refresh: true - body: - - index: - _index: test - _type: type - _id: '' - - f: 1 - - index: - _index: test - _type: type - _id: id - - f: 2 - - index: - _index: test - _type: type - - f: 3 - - match: { errors: true } - - match: { items.0.index.status: 400 } - - match: { items.0.index.error.type: illegal_argument_exception } - - match: { items.0.index.error.reason: if _id is specified it must not be empty } - - match: { items.1.index.result: created } - - match: { items.2.index.result: created } - - - do: - count: - index: test - - - match: { count: 2 } - ---- -"Empty _id with op_type create": - - skip: - version: " - 7.4.99" - reason: "auto id + op type create only supported since 7.5" - - - do: - bulk: - refresh: true - body: - - index: - _index: test - _type: type - _id: '' - - f: 1 - - index: - _index: test - _type: type - _id: id - - f: 2 - - index: - _index: test - _type: type - - f: 3 - - create: - _index: test - _type: type - - f: 4 - - index: - _index: test - _type: type - op_type: create - - f: 5 - - match: { errors: true } - - match: { items.0.index.status: 400 } - - match: { items.0.index.error.type: illegal_argument_exception } - - match: { items.0.index.error.reason: if _id is specified it must not be empty } - - match: { items.1.index.result: created } - - match: { items.2.index.result: created } - - match: { items.3.create.result: created } - - match: { items.4.create.result: created } - - - do: - count: - index: test - - - match: { count: 4 } - ---- -"empty action": - - skip: - features: headers - - - do: - catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/ - headers: - Content-Type: application/json - bulk: - body: | - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} - {"f1": "v1", "f2": 42} - {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml index b23517f6a8f25..3d956dce54289 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml @@ -1,9 +1,5 @@ --- "List of strings": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: bulk: refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/21_list_of_strings_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/21_list_of_strings_with_types.yml deleted file mode 100644 index def91f4280722..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/21_list_of_strings_with_types.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -"List of strings": - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}}' - - '{"f1": "v1", "f2": 42}' - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}}' - - '{"f1": "v2", "f2": 47}' - - - do: - count: - index: test_index - - - match: {count: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml index 38706d133e44b..8b6467eeed975 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml @@ -1,9 +1,5 @@ --- "One big string": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: bulk: refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/31_big_string_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/31_big_string_with_types.yml deleted file mode 100644 index 1d117253c9b01..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/31_big_string_with_types.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -"One big string": - - do: - bulk: - refresh: true - body: | - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: test_index - - - match: {count: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml index 5e783d60d3d46..e29e84740ee5c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml @@ -1,9 +1,5 @@ --- "Source filtering": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: index: refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml deleted file mode 100644 index 3c8a86c13bdac..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml +++ /dev/null @@ -1,76 +0,0 @@ ---- -"Source filtering": - - do: - index: - refresh: true - index: test_index - type: test_type - id: test_id_1 - body: { "foo": "bar", "bar": "foo" } - - - do: - index: - refresh: true - index: test_index - type: test_type - id: test_id_2 - body: { "foo": "qux", "bar": "pux" } - - - do: - index: - refresh: true - index: test_index - type: test_type - id: test_id_3 - body: { "foo": "corge", "bar": "forge" } - - - - do: - bulk: - refresh: true - body: | - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_source": true } } - { "doc": { "foo": "baz" } } - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2" } } - { "_source": true, "doc": { "foo": "quux" } } - - - match: { items.0.update.get._source.foo: baz } - - match: { items.1.update.get._source.foo: quux } - - - do: - bulk: - index: test_index - type: test_type - _source: true - body: | - { "update": { "_id": "test_id_3" } } - { "doc": { "foo": "garply" } } - - - match: { items.0.update.get._source.foo: garply } - - - do: - bulk: - refresh: true - body: | - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_source": {"includes": "bar"} } } - { "doc": { "foo": "baz" } } - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2" } } - { "_source": {"includes": "foo"}, "doc": { "foo": "quux" } } - - - match: { items.0.update.get._source.bar: foo } - - is_false: items.0.update.get._source.foo - - match: { items.1.update.get._source.foo: quux } - - is_false: items.1.update.get._source.bar - - - do: - bulk: - index: test_index - type: test_type - _source_includes: foo - body: | - { "update": { "_id": "test_id_3" } } - { "doc": { "foo": "garply" } } - - - match: { items.0.update.get._source.foo: garply } - - is_false: items.0.update.get._source.bar - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml index 77098779c0c4f..34fc94691c21a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml @@ -1,9 +1,5 @@ --- "refresh=true immediately makes changes are visible in search": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: bulk: refresh: true @@ -20,10 +16,6 @@ --- "refresh=empty string immediately makes changes are visible in search": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: bulk: refresh: "" @@ -41,10 +33,6 @@ --- "refresh=wait_for waits until changes are visible in search": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: bulk: refresh: wait_for diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/51_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/51_refresh_with_types.yml deleted file mode 100644 index 6326b9464caa0..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/51_refresh_with_types.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -"refresh=true immediately makes changes are visible in search": - - do: - bulk: - refresh: true - body: | - {"index": {"_index": "bulk_50_refresh_1", "_type": "test_type", "_id": "bulk_50_refresh_id1"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "bulk_50_refresh_1", "_type": "test_type", "_id": "bulk_50_refresh_id2"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: bulk_50_refresh_1 - - match: {count: 2} - ---- -"refresh=empty string immediately makes changes are visible in search": - - do: - bulk: - refresh: "" - body: | - {"index": {"_index": "bulk_50_refresh_2", "_type": "test_type", "_id": "bulk_50_refresh_id3"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "bulk_50_refresh_2", "_type": "test_type", "_id": "bulk_50_refresh_id4"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: bulk_50_refresh_2 - - match: {count: 2} - - ---- -"refresh=wait_for waits until changes are visible in search": - - do: - bulk: - refresh: wait_for - body: | - {"index": {"_index": "bulk_50_refresh_3", "_type": "test_type", "_id": "bulk_50_refresh_id5"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "bulk_50_refresh_3", "_type": "test_type", "_id": "bulk_50_refresh_id6"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: bulk_50_refresh_3 - - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml index 1401fcc086208..8c8a840eb3f47 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml @@ -1,12 +1,6 @@ --- "Deprecated parameters should fail in Bulk query": - - - skip: - version: " - 6.99.99" - reason: some parameters are removed starting from 7.0, their equivalents without underscore are used instead - features: "warnings" - - do: catch: bad_request bulk: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml deleted file mode 100644 index cad0891b21e52..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -"bulk without types on an index that has types": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - do: - bulk: - refresh: true - body: - - index: - _index: index - _id: 0 - - foo: bar - - index: - _index: index - _id: 1 - - foo: bar - - - do: - count: - index: index - - - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/80_cas.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/80_cas.yml index 902621cfba578..87d3d237d42cb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/80_cas.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/80_cas.yml @@ -1,10 +1,5 @@ --- "Compare And Swap Sequence Numbers": - - - skip: - version: " - 6.99.99" - reason: typeless API are add in 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/81_cas_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/81_cas_with_types.yml deleted file mode 100644 index 101316e7bf504..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/81_cas_with_types.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -"Compare And Swap Sequence Numbers": - - - skip: - version: " - 6.6.99" - reason: cas operations with sequence numbers was added in 6.7 - - - do: - index: - index: test_1 - type: _doc - id: 1 - body: { foo: bar } - - match: { _version: 1} - - set: { _seq_no: seqno } - - set: { _primary_term: primary_term } - - - do: - bulk: - body: - - index: - _index: test_1 - _type: _doc - _id: 1 - if_seq_no: 10000 - if_primary_term: $primary_term - - foo: bar2 - - - match: { errors: true } - - match: { items.0.index.status: 409 } - - match: { items.0.index.error.type: version_conflict_engine_exception } - - - do: - bulk: - body: - - index: - _index: test_1 - _type: _doc - _id: 1 - if_seq_no: $seqno - if_primary_term: $primary_term - - foo: bar2 - - - match: { errors: false} - - match: { items.0.index.status: 200 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yml index aa6c96202eaf4..db74e51cc2f91 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.health/10_basic.yml @@ -94,10 +94,6 @@ --- "cluster health basic test, one index with wait for no initializing shards": - - skip: - version: " - 6.1.99" - reason: "wait_for_no_initializing_shards is introduced in 6.2.0" - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml index a40f4803ab0b8..a0432fa7aa558 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml @@ -75,10 +75,6 @@ --- "get cluster stats returns discovery types": - - skip: - version: " - 6.99.99" - reason: "discovery types are added for v7.0.0" - - do: cluster.stats: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml deleted file mode 100644 index 48cfc610b435e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml +++ /dev/null @@ -1,66 +0,0 @@ -setup: - - do: - indices.create: - index: test - - do: - index: - index: test - type: test - id: 1 - body: { foo: bar } - - - do: - indices.refresh: - index: [test] - ---- -"count with body": - - do: - count: - index: test - type: test - body: - query: - match: - foo: bar - - - match: {count : 1} - - - do: - count: - index: test - body: - query: - match: - foo: test - - - match: {count : 0} - ---- -"count with empty body": -# empty body should default to match_all query - - do: - count: - index: test - type: test - body: { } - - - match: {count : 1} - - - do: - count: - index: test - type: test - - - match: {count : 1} - ---- -"count body without query element": - - do: - catch: bad_request - count: - index: test - type: test - body: - match: - foo: bar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/11_with_id_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/11_with_id_with_types.yml deleted file mode 100644 index 1e58c38c7b589..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/11_with_id_with_types.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -"Create with ID": - - do: - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: "1"} - - match: { _version: 1} - - - do: - get: - index: test_1 - type: test - id: 1 - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: "1"} - - match: { _version: 1} - - match: { _source: { foo: bar }} - - - do: - catch: conflict - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id_with_types.yml deleted file mode 100644 index ab9932819381f..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id_with_types.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -"Create without ID": - - do: - catch: param - create: - index: test_1 - type: test - body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_version_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_version_with_types.yml deleted file mode 100644 index cb8c041d7102c..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_version_with_types.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -"External version": - - - do: - catch: bad_request - create: - index: test - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 0 - - - match: { status: 400 } - - match: { error.type: action_request_validation_exception } - - match: { error.reason: "Validation Failed: 1: create operations only support internal versioning. use index instead;" } - - - do: - catch: bad_request - create: - index: test - type: test - id: 2 - body: { foo: bar } - version_type: external - version: 5 - - - match: { status: 400 } - - match: { error.type: action_request_validation_exception } - - match: { error.reason: "Validation Failed: 1: create operations only support internal versioning. use index instead;" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/41_routing_with_types.yml deleted file mode 100644 index 752489f722c9e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/41_routing_with_types.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -"Routing": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - create: - index: test_1 - type: test - id: 1 - routing: 5 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 5 - stored_fields: [_routing] - - - match: { _id: "1"} - - match: { _routing: "5"} - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/61_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/61_refresh_with_types.yml deleted file mode 100644 index e24bdf4260340..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/61_refresh_with_types.yml +++ /dev/null @@ -1,82 +0,0 @@ ---- -"Refresh": - - - do: - indices.create: - index: test_1 - body: - settings: - index.refresh_interval: -1 - number_of_replicas: 0 - - do: - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 1 }} - - - match: { hits.total: 0 } - - - do: - create: - index: test_1 - type: test - id: 2 - refresh: true - body: { foo: bar } - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 2 }} - - - match: { hits.total: 1 } - ---- -"When refresh url parameter is an empty string that means \"refresh immediately\"": - - do: - create: - index: test_1 - type: test - id: 1 - refresh: "" - body: { foo: bar } - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 1 }} - - - match: { hits.total: 1 } - ---- -"refresh=wait_for waits until changes are visible in search": - - do: - index: - index: create_60_refresh_1 - type: test - id: create_60_refresh_id1 - body: { foo: bar } - refresh: wait_for - - is_false: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: create_60_refresh_1 - body: - query: { term: { _id: create_60_refresh_id1 }} - - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml index e6d2413f16788..6b4e7ccc48ca6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/70_nested.yml @@ -1,8 +1,5 @@ --- setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: index: test_1 @@ -16,9 +13,6 @@ setup: --- "Indexing a doc with No. nested objects less or equal to index.mapping.nested_objects.limit should succeed": - - skip: - version: " - 6.99.99" - reason: index.mapping.nested_objects setting has been added in 7.0.0 - do: create: index: test_1 @@ -29,9 +23,6 @@ setup: --- "Indexing a doc with No. nested objects more than index.mapping.nested_objects.limit should fail": - - skip: - version: " - 6.99.99" - reason: index.mapping.nested_objects setting has been added in 7.0.0 - do: catch: /The number of nested documents has exceeded the allowed limit of \[2\]. This limit can be set by changing the \[index.mapping.nested_objects.limit\] index level setting\./ create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml deleted file mode 100644 index 755aaca448b0b..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/71_nested_with_types.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -setup: - - do: - indices.create: - include_type_name: true - index: test_1 - body: - settings: - index.mapping.nested_objects.limit: 2 - mappings: - test_type: - properties: - nested1: - type: nested - ---- -"Indexing a doc with No. nested objects less or equal to index.mapping.nested_objects.limit should succeed": - - skip: - version: " - 6.99.99" - reason: index.mapping.nested_objects setting has been added in 7.0.0 - - do: - create: - index: test_1 - type: test_type - id: 1 - body: - "nested1" : [ { "foo": "bar" }, { "foo": "bar2" } ] - - match: { _version: 1} - ---- -"Indexing a doc with No. nested objects more than index.mapping.nested_objects.limit should fail": - - skip: - version: " - 6.99.99" - reason: index.mapping.nested_objects setting has been added in 7.0.0 - - do: - catch: /The number of nested documents has exceeded the allowed limit of \[2\]. This limit can be set by changing the \[index.mapping.nested_objects.limit\] index level setting\./ - create: - index: test_1 - type: test_type - id: 1 - body: - "nested1" : [ { "foo": "bar" }, { "foo": "bar2" }, { "foo": "bar3" } ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml index 3fc10bc8db12d..6a2f852b221c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml @@ -29,7 +29,6 @@ id: 1 - match: { _index: foobar } - - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 2} - match: { _shards.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/13_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/13_basic_with_types.yml deleted file mode 100644 index a3671d5ac24b0..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/13_basic_with_types.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -"Basic": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - match: { _version: 1 } - - - do: - delete: - index: test_1 - type: test - id: 1 - - - match: { _version: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/14_shard_header_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/14_shard_header_with_types.yml deleted file mode 100644 index d1bb4c0df347d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/14_shard_header_with_types.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -"Delete check shard header": - - - do: - indices.create: - index: foobar - body: - settings: - number_of_shards: "1" - number_of_replicas: "0" - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: foobar - type: baz - id: 1 - body: { foo: bar } - - - do: - delete: - index: foobar - type: baz - id: 1 - - - match: { _index: foobar } - - match: { _type: baz } - - match: { _id: "1"} - - match: { _version: 2} - - match: { _shards.total: 1} - - match: { _shards.successful: 1} - - match: { _shards.failed: 0} - - is_false: _shards.pending diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/15_result_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/15_result_with_types.yml deleted file mode 100644 index d01e88be8ad0b..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/15_result_with_types.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -"Delete result field": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - delete: - index: test_1 - type: test - id: 1 - - - match: { result: deleted } - - - do: - catch: missing - delete: - index: test_1 - type: test - id: 1 - - - match: { result: not_found } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/21_cas_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/21_cas_with_types.yml deleted file mode 100644 index ef352a9bad6b1..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/21_cas_with_types.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -"Internal version": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - match: { _seq_no: 0 } - - - do: - catch: conflict - delete: - index: test_1 - type: test - id: 1 - if_seq_no: 2 - if_primary_term: 1 - - - do: - delete: - index: test_1 - type: test - id: 1 - if_seq_no: 0 - if_primary_term: 1 - - - match: { _seq_no: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/27_external_version_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/27_external_version_with_types.yml deleted file mode 100644 index 453d64d85bbc1..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/27_external_version_with_types.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -"External version": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 5 - - - match: { _version: 5} - - - do: - catch: conflict - delete: - index: test_1 - type: test - id: 1 - version_type: external - version: 4 - - - do: - delete: - index: test_1 - type: test - id: 1 - version_type: external - version: 6 - - - match: { _version: 6} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/28_external_gte_version_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/28_external_gte_version_with_types.yml deleted file mode 100644 index 70f78c17faa63..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/28_external_gte_version_with_types.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -"External GTE version": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 5 - - - match: { _version: 5} - - - do: - catch: conflict - delete: - index: test_1 - type: test - id: 1 - version_type: external_gte - version: 4 - - - do: - delete: - index: test_1 - type: test - id: 1 - version_type: external_gte - version: 6 - - - match: { _version: 6} - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 6 - - - match: { _version: 6} - - - do: - delete: - index: test_1 - type: test - id: 1 - version_type: external_gte - version: 6 - - - match: { _version: 6} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/31_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/31_routing_with_types.yml deleted file mode 100644 index 6f67b3a03f401..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/31_routing_with_types.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -"Routing": - - - do: - indices.create: - index: test_1 - body: - settings: - number_of_shards: 5 - - do: - index: - index: test_1 - type: test - id: 1 - routing: 5 - body: { foo: bar } - - - do: - catch: missing - delete: - index: test_1 - type: test - id: 1 - routing: 4 - - - do: - delete: - index: test_1 - type: test - id: 1 - routing: 5 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/51_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/51_refresh_with_types.yml deleted file mode 100644 index a901c1033f7c0..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/51_refresh_with_types.yml +++ /dev/null @@ -1,148 +0,0 @@ ---- -"Refresh": - - - do: - indices.create: - index: test_1 - body: - settings: - refresh_interval: -1 - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - refresh: true - -# If you wonder why this document get 3 as an id instead of 2, it is because the -# current routing algorithm would route 1 and 2 to the same shard while we need -# them to be different for this test to pass - - do: - index: - index: test_1 - type: test - id: 3 - body: { foo: bar } - refresh: true - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { terms: { _id: [1,3] }} - - - match: { hits.total: 2 } - - - do: - delete: - index: test_1 - type: test - id: 1 - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { terms: { _id: [1,3] }} - - - match: { hits.total: 2 } - - - do: - delete: - index: test_1 - type: test - id: 3 - refresh: true - -# If a replica shard where doc 1 is located gets initialized at this point, doc 1 -# won't be found by the following search as the shard gets automatically refreshed -# right before getting started. This is why this test only works with 0 replicas. - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { terms: { _id: [1,3] }} - - - match: { hits.total: 1 } - ---- -"When refresh url parameter is an empty string that means \"refresh immediately\"": - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - refresh: true - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 1 }} - - match: { hits.total: 1 } - - - do: - delete: - index: test_1 - type: test - id: 1 - refresh: "" - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 1 }} - - match: { hits.total: 0 } - ---- -"refresh=wait_for waits until changes are visible in search": - - do: - index: - index: delete_50_refresh_1 - type: test - id: delete_50_refresh_id1 - body: { foo: bar } - refresh: true - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: delete_50_refresh_1 - body: - query: { term: { _id: delete_50_refresh_id1 }} - - match: { hits.total: 1 } - - - do: - delete: - index: delete_50_refresh_1 - type: test - id: delete_50_refresh_id1 - refresh: wait_for - - is_false: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: delete_50_refresh_1 - body: - query: { term: { _id: delete_50_refresh_id1 }} - - match: { hits.total: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/61_missing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/61_missing_with_types.yml deleted file mode 100644 index 9cfdb48ae20aa..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/61_missing_with_types.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -"Missing document with catch": - - - do: - catch: missing - delete: - index: test_1 - type: test - id: 1 - ---- -"Missing document with ignore": - - - do: - delete: - index: test_1 - type: test - id: 1 - ignore: 404 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml deleted file mode 100644 index e0f20795e41ca..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -"DELETE with typeless API on an index that has types": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - index: - index: index - type: not_doc - id: 1 - body: { foo: bar } - - - do: - catch: bad_request - delete: - index: index - type: some_random_type - id: 1 - - - match: { error.root_cause.0.reason: "/Rejecting.mapping.update.to.\\[index\\].as.the.final.mapping.would.have.more.than.1.type.*/" } - - - do: - delete: - index: index - id: 1 - - - match: { _index: "index" } - - match: { _type: "_doc" } - - match: { _id: "1"} - - match: { _version: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/10_basic.yml index 1ab90e3efa83f..84f5fa67590e6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/10_basic.yml @@ -1,9 +1,5 @@ --- "Basic": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: exists: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/11_basic_with_types.yml deleted file mode 100644 index 7e4e26b6b1c1c..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/11_basic_with_types.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -"Basic": - - - do: - exists: - index: test_1 - type: test - id: 1 - - - is_false: '' - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "foo": "bar" } - - - is_true: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - - - is_true: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - version: 1 - - - is_true: '' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/41_routing_with_types.yml deleted file mode 100644 index 25315628d7ece..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/41_routing_with_types.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -"Routing": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - routing: 5 - body: { foo: bar } - - - do: - exists: - index: test_1 - type: test - id: 1 - routing: 5 - - - is_true: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - - - is_false: '' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/61_realtime_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/61_realtime_refresh_with_types.yml deleted file mode 100644 index df8c697e4a1fb..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/61_realtime_refresh_with_types.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -"Realtime Refresh": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - refresh_interval: -1 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - exists: - index: test_1 - type: test - id: 1 - realtime: false - - - is_false: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - realtime: true - - - is_true: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - realtime: false - refresh: true - - - is_true: '' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/70_defaults.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/70_defaults.yml index 6fabdd59820cf..24e296130e405 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/70_defaults.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/70_defaults.yml @@ -1,9 +1,5 @@ --- "Client-side default type": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/71_defaults_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/71_defaults_with_types.yml deleted file mode 100644 index 2db28f6634bd6..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/71_defaults_with_types.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -"Client-side default type": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "foo": "bar" } - - - do: - exists: - index: test_1 - type: _all - id: 1 - - - is_true: '' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml index bfe8da8d91519..6933d28a8492e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 @@ -34,7 +30,6 @@ setup: - is_true: matched - match: { explanation.value: 1 } - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: id_1 } --- @@ -51,7 +46,6 @@ setup: - is_true: matched - match: { explanation.value: 1 } - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: id_1 } --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/11_basic_with_types.yml deleted file mode 100644 index 5f211435ae976..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/11_basic_with_types.yml +++ /dev/null @@ -1,66 +0,0 @@ -setup: - - do: - indices.create: - index: test_1 - body: - aliases: - alias_1: - "filter" : { "term" : { "foo" : "bar"} } - - - do: - index: - index: test_1 - type: test - id: id_1 - body: { foo: bar, title: howdy } - - - do: - indices.refresh: {} - ---- -"Basic explain": - - - do: - explain: - index: test_1 - type: test - id: id_1 - body: - query: - match_all: {} - - - is_true: matched - - match: { explanation.value: 1 } - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: id_1 } - ---- -"Basic explain with alias": - - - do: - explain: - index: alias_1 - type: test - id: id_1 - body: - query: - match_all: {} - - - is_true: matched - - match: { explanation.value: 1 } - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: id_1 } - ---- -"Explain body without query element": - - do: - catch: bad_request - explain: - index: test_1 - type: test - id: id_1 - body: - match_all: {} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml index ad596f980807b..3d2f42d31f4df 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml @@ -1,9 +1,5 @@ --- "Source filtering": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -16,7 +12,6 @@ - do: explain: { index: test_1, id: 1, _source: false, body: { query: { match_all: {}} } } - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: "1" } - is_false: get._source diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/21_source_filtering_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/21_source_filtering_with_types.yml deleted file mode 100644 index e13edf7be5046..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/21_source_filtering_with_types.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -"Source filtering": - - do: - index: - index: test_1 - type: test - id: 1 - body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } - - do: - indices.refresh: - index: test_1 - - - do: - explain: { index: test_1, type: test, id: 1, _source: false, body: { query: { match_all: {}} } } - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: "1" } - - is_false: get._source - - - do: - explain: { index: test_1, type: test, id: 1, _source: true, body: { query: { match_all: {}} } } - - match: { get._source.include.field1: v1 } - - - do: - explain: { index: test_1, type: test, id: 1, _source: include.field1, body: { query: { match_all: {}} } } - - match: { get._source.include.field1: v1 } - - is_false: get._source.include.field2 - - - do: - explain: { index: test_1, type: test, id: 1, _source_includes: include.field1, body: { query: { match_all: {}} } } - - match: { get._source.include.field1: v1 } - - is_false: get._source.include.field2 - - - do: - explain: { index: test_1, type: test, id: 1, _source_includes: "include.field1,include.field2", body: { query: { match_all: {}} } } - - match: { get._source.include.field1: v1 } - - match: { get._source.include.field2: v2 } - - is_false: get._source.count - - - do: - explain: { index: test_1, type: test, id: 1, _source_includes: include, _source_excludes: "*.field2", body: { query: { match_all: {}} } } - - match: { get._source.include.field1: v1 } - - is_false: get._source.include.field2 - - is_false: get._source.count diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml index ac34d4c2495f2..5c9b391ded6b8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yml @@ -1,9 +1,5 @@ --- "explain with query_string parameters": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/31_query_string_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/31_query_string_with_types.yml deleted file mode 100644 index b6930688acf2d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/31_query_string_with_types.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -"explain with query_string parameters": - - do: - indices.create: - include_type_name: true - index: test - body: - mappings: - test: - properties: - number: - type: integer - - - do: - index: - index: test - type: test - id: 1 - body: { field: foo bar} - - - do: - indices.refresh: - index: [test] - - - do: - explain: - index: test - type: test - id: 1 - q: bar - df: field - - - is_true: matched - - - do: - explain: - index: test - type: test - id: 1 - q: field:foo field:xyz - - - is_true: matched - - - do: - explain: - index: test - type: test - id: 1 - q: field:foo field:xyz - default_operator: AND - - - is_false: matched - - - do: - explain: - index: test - type: test - id: 1 - q: field:BA* - - - is_true: matched - - - do: - explain: - index: test - type: test - id: 1 - q: number:foo - lenient: true - - - is_false: matched diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml deleted file mode 100644 index 36fdbaa6b6f78..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -"Explain with typeless API on an index that has types": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - index: - index: index - type: not_doc - id: 1 - body: { foo: bar } - - - do: - indices.refresh: {} - - - do: - catch: missing - explain: - index: index - type: some_random_type - id: 1 - body: - query: - match_all: {} - - - match: { _index: "index" } - - match: { _type: "some_random_type" } - - match: { _id: "1"} - - match: { matched: false} - - - do: - explain: - index: index - type: _doc #todo: make _explain typeless and remove this - id: 1 - body: - query: - match_all: {} - - - match: { _index: "index" } - - match: { _type: "_doc" } - - match: { _id: "1"} - - is_true: matched - - match: { explanation.value: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml index d125efa73011c..f1ae5c89e52a5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml @@ -149,10 +149,6 @@ setup: - is_false: fields.geo.keyword.on_aggregatable_indices --- "Get date_nanos field caps": - - skip: - version: " - 6.99.99" - reason: date_nanos field mapping type has been introcued in 7.0 - - do: indices.create: include_type_name: false @@ -204,10 +200,6 @@ setup: - is_false: fields.object\.nested2.keyword.non_searchable_indices --- "Get object and nested field caps": - - skip: - version: " - 6.99.99" - reason: object and nested fields are returned since 7.0 - - do: field_caps: index: 'test1,test2,test3' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml deleted file mode 100644 index d13229dbffbc6..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -"GET with typeless API on an index that has types": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - index: - index: index - type: not_doc - id: 1 - body: { foo: bar } - - - do: - catch: missing - get: - index: index - type: some_random_type - id: 1 - - - match: { _index: "index" } - - match: { _type: "some_random_type" } - - match: { _id: "1"} - - match: { found: false} - - - do: - get: - index: index - id: 1 - - - match: { _index: "index" } - - match: { _type: "_doc" } - - match: { _id: "1"} - - match: { _version: 1} - - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml index 9183c70c29bce..822e96e405583 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml @@ -1,10 +1,5 @@ --- "Basic": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -17,6 +12,5 @@ id: 中文 - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: 中文 } - match: { _source: { foo: "Hello: 中文" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/11_basic_with_types.yml deleted file mode 100644 index 0689f714d6416..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/11_basic_with_types.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -"Basic": - - - do: - index: - index: test_1 - type: test - id: 中文 - body: { "foo": "Hello: 中文" } - - - do: - get: - index: test_1 - type: test - id: 中文 - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: 中文 } - - match: { _source: { foo: "Hello: 中文" } } - - - do: - get: - index: test_1 - type: _all - id: 中文 - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: 中文 } - - match: { _source: { foo: "Hello: 中文" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml index 67065270665cf..921397b238f51 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml @@ -1,9 +1,5 @@ --- "Default values": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: index: index: test_1 @@ -16,7 +12,6 @@ id: 1 - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: '1' } - match: { _source: { foo: "bar" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/16_default_values_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/16_default_values_with_types.yml deleted file mode 100644 index 5e08112253ef0..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/16_default_values_with_types.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -"Default values": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "foo": "bar" } - - - do: - get: - index: test_1 - type: _all - id: 1 - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: '1' } - - match: { _source: { foo: "bar" } } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml index ab27842e4516e..23c7e5cbc90a6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml @@ -1,9 +1,5 @@ --- "Stored fields": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: index: test_1 @@ -29,7 +25,6 @@ stored_fields: foo - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: '1' } - match: { fields.foo: [bar] } - is_false: _source diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/21_stored_fields_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/21_stored_fields_with_types.yml deleted file mode 100644 index d1862fc0340d8..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/21_stored_fields_with_types.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -"Stored fields": - - - do: - indices.create: - include_type_name: true - index: test_1 - body: - mappings: - test: - properties: - foo: - type: keyword - store: true - count: - type: integer - store: true - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "foo": "bar", "count": 1 } - - do: - get: - index: test_1 - type: test - id: 1 - stored_fields: foo - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: '1' } - - match: { fields.foo: [bar] } - - is_false: _source - - - do: - get: - index: test_1 - type: test - id: 1 - stored_fields: [foo, count] - - - match: { fields.foo: [bar] } - - match: { fields.count: [1] } - - is_false: _source - - - do: - get: - index: test_1 - type: test - id: 1 - stored_fields: [foo, count, _source] - - - match: { fields.foo: [bar] } - - match: { fields.count: [1] } - - match: { _source.foo: bar } - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/41_routing_with_types.yml deleted file mode 100644 index 276346cda4f98..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/41_routing_with_types.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -"Routing": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - routing: 5 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 5 - stored_fields: [_routing] - - - match: { _id: "1"} - - match: { _routing: "5"} - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml index 38130cee59810..d79a3bd300da8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml @@ -18,7 +18,6 @@ id: 1 - match: {_index: "test_1"} - - match: { _type: _doc } - match: {_id: "1"} - match: {_version: 1} - match: {found: true} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/51_with_headers_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/51_with_headers_with_types.yml deleted file mode 100644 index b88dbaafc4fb2..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/51_with_headers_with_types.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -"REST test with headers": - - skip: - features: ["headers", "yaml"] - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "body": "foo" } - - - do: - headers: - Accept: application/yaml - get: - index: test_1 - type: _all - id: 1 - - - match: {_index: "test_1"} - - match: {_type: "test"} - - match: {_id: "1"} - - match: {_version: 1} - - match: {found: true} - - match: { _source: { body: foo }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/61_realtime_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/61_realtime_refresh_with_types.yml deleted file mode 100644 index 7d02b4667efe7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/61_realtime_refresh_with_types.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -"Realtime Refresh": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - refresh_interval: -1 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - realtime: false - - - do: - get: - index: test_1 - type: test - id: 1 - realtime: true - - - is_true: found - - - do: - get: - index: test_1 - type: test - id: 1 - realtime: false - refresh: true - - - is_true: found diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml index f4a5ba39be3b8..8ef3ad708fc18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml @@ -1,9 +1,5 @@ --- "Source filtering": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: index: test_1 @@ -23,7 +19,6 @@ get: { index: test_1, id: 1, _source: false } - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: "1" } - is_false: _source @@ -62,7 +57,6 @@ _source: true - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: "1" } - match: { fields.count: [1] } - match: { _source.include.field1: v1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml deleted file mode 100644 index 3ac493c629f20..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- -"Source filtering": - - - do: - indices.create: - include_type_name: true - index: test_1 - body: - mappings: - test: - properties: - count: - type: integer - store: true - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } - - do: - get: { index: test_1, type: test, id: 1, _source: false } - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: "1" } - - is_false: _source - - - do: - get: { index: test_1, type: test, id: 1, _source: true } - - match: { _source.include.field1: v1 } - - - do: - get: { index: test_1, type: test, id: 1, _source: include.field1 } - - match: { _source.include.field1: v1 } - - is_false: _source.include.field2 - - - do: - get: { index: test_1, type: test, id: 1, _source_includes: include.field1 } - - match: { _source.include.field1: v1 } - - is_false: _source.include.field2 - - - do: - get: { index: test_1, type: test, id: 1, _source_includes: "include.field1,include.field2" } - - match: { _source.include.field1: v1 } - - match: { _source.include.field2: v2 } - - is_false: _source.count - - - do: - get: { index: test_1, type: test, id: 1, _source_includes: include, _source_excludes: "*.field2" } - - match: { _source.include.field1: v1 } - - is_false: _source.include.field2 - - is_false: _source.count - - - - do: - get: - index: test_1 - type: test - id: 1 - stored_fields: count - _source: true - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: "1" } - - match: { fields.count: [1] } - - match: { _source.include.field1: v1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml index d7d8edfc65dcb..30efd759c1a65 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml @@ -1,9 +1,5 @@ --- "Missing document with catch": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: catch: missing get: @@ -12,10 +8,6 @@ --- "Missing document with ignore": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: get: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/81_missing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/81_missing_with_types.yml deleted file mode 100644 index a60d11388566d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/81_missing_with_types.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -"Missing document with catch": - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - ---- -"Missing document with ignore": - - - do: - get: - index: test_1 - type: test - id: 1 - ignore: 404 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/91_versions_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/91_versions_with_types.yml deleted file mode 100644 index c6631b83b1867..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/91_versions_with_types.yml +++ /dev/null @@ -1,89 +0,0 @@ ---- -"Versions": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - match: { _version: 1} - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - match: { _version: 2} - - - do: - get: - index: test_1 - type: test - id: 1 - version: 2 - - match: { _id: "1" } - - - do: - catch: conflict - get: - index: test_1 - type: test - id: 1 - version: 1 - - - do: - get: - index: test_1 - type: test - id: 1 - version: 2 - version_type: external - - match: { _id: "1" } - - - do: - catch: conflict - get: - index: test_1 - type: test - id: 1 - version: 10 - version_type: external - - - do: - catch: conflict - get: - index: test_1 - type: test - id: 1 - version: 1 - version_type: external - - - do: - get: - index: test_1 - type: test - id: 1 - version: 2 - version_type: external_gte - - match: { _id: "1" } - - - do: - catch: conflict - get: - index: test_1 - type: test - id: 1 - version: 10 - version_type: external_gte - - - do: - catch: conflict - get: - index: test_1 - type: test - id: 1 - version: 1 - version_type: external_gte - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/10_basic.yml index 6f81c430c883a..887e31f33d45e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/10_basic.yml @@ -1,10 +1,5 @@ --- "Basic": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/11_basic_with_types.yml deleted file mode 100644 index 1446f569e86d8..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/11_basic_with_types.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -"Basic with types": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "foo": "bar" } - - - do: - get_source: - index: test_1 - type: test - id: 1 - - - match: { '': { foo: bar } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/15_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/15_default_values.yml index 57c11a1ca10e2..73fce7ce09bbf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/15_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/15_default_values.yml @@ -1,11 +1,5 @@ --- "Default values": - - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/16_default_values_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/16_default_values_with_types.yml deleted file mode 100644 index e2de7a9f0007c..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/16_default_values_with_types.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -"Default values": - - do: - index: - index: test_1 - type: test - id: 1 - body: { "foo": "bar" } - - - do: - get_source: - index: test_1 - type: test - id: 1 - - - match: { '': { foo: bar } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/41_routing_with_types.yml deleted file mode 100644 index db53a33ba597e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/41_routing_with_types.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -"Routing": - - - - do: - indices.create: - index: test_1 - body: - settings: - index: - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - routing: 5 - body: { foo: bar } - - - do: - get_source: - index: test_1 - type: test - id: 1 - routing: 5 - - - match: { '': {foo: bar}} - - - do: - catch: missing - get_source: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/61_realtime_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/61_realtime_refresh_with_types.yml deleted file mode 100644 index f5b406de28b4a..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/61_realtime_refresh_with_types.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -"Realtime": - - - do: - indices.create: - index: test_1 - body: - settings: - refresh_interval: -1 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - catch: missing - get_source: - index: test_1 - type: test - id: 1 - realtime: false - - - do: - get_source: - index: test_1 - type: test - id: 1 - realtime: true - - - match: { '': {foo: bar}} - - - do: - get_source: - index: test_1 - type: test - id: 1 - realtime: false - refresh: true - - - match: { '': {foo: bar}} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml index 2665458cea95d..0836979fbf83a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml @@ -1,11 +1,5 @@ --- "Source filtering": - - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/71_source_filtering_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/71_source_filtering_with_types.yml deleted file mode 100644 index b4f20fee53be2..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/71_source_filtering_with_types.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -"Source filtering": - - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } - - - do: - get_source: { index: test_1, type: test, id: 1, _source_includes: include.field1 } - - match: { include.field1: v1 } - - is_false: include.field2 - - - do: - get_source: { index: test_1, type: test, id: 1, _source_includes: "include.field1,include.field2" } - - match: { include.field1: v1 } - - match: { include.field2: v2 } - - is_false: count - - - do: - get_source: { index: test_1, type: test, id: 1, _source_includes: include, _source_excludes: "*.field2" } - - match: { include.field1: v1 } - - is_false: include.field2 - - is_false: count diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/81_missing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/81_missing_with_types.yml deleted file mode 100644 index 16eb5ea51e898..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/81_missing_with_types.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -"Missing document with catch": - - - do: - catch: missing - get_source: - index: test_1 - type: test - id: 1 - ---- -"Missing document with ignore": - - - do: - get_source: - index: test_1 - type: test - id: 1 - ignore: 404 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/86_source_missing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/86_source_missing_with_types.yml deleted file mode 100644 index d7cfced5164ec..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/86_source_missing_with_types.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -setup: - - - do: - indices.create: - include_type_name: true - index: test_1 - body: - mappings: - test: - _source: { enabled: false } - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - ---- -"Missing document source with catch": - - - do: - catch: missing - get_source: - index: test_1 - type: test - id: 1 - ---- -"Missing document source with ignore": - - - do: - get_source: - index: test_1 - type: test - id: 1 - ignore: 404 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml index a129dcab80d9a..97eb9be1547ba 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml @@ -12,7 +12,6 @@ body: { foo: bar } - match: { _index: test-weird-index-中文 } - - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 1} @@ -22,7 +21,6 @@ id: 1 - match: { _index: test-weird-index-中文 } - - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 1} - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/11_with_id_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/11_with_id_with_types.yml deleted file mode 100644 index daac81849fb5e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/11_with_id_with_types.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -"Index with ID": - - - do: - index: - index: test-weird-index-中文 - type: weird.type - id: 1 - body: { foo: bar } - - - match: { _index: test-weird-index-中文 } - - match: { _type: weird.type } - - match: { _id: "1"} - - match: { _version: 1} - - - do: - get: - index: test-weird-index-中文 - type: weird.type - id: 1 - - - match: { _index: test-weird-index-中文 } - - match: { _type: weird.type } - - match: { _id: "1"} - - match: { _version: 1} - - match: { _source: { foo: bar }} - - - do: - catch: bad_request - index: - index: idx - type: type - id: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml index f8a50415a95ef..478a731828738 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml @@ -1,9 +1,5 @@ --- "Index result field": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: index: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/13_result_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/13_result_with_types.yml deleted file mode 100644 index 45ebe0bbd3dc1..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/13_result_with_types.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -"Index result field": - - - do: - index: - index: test_index - type: test - id: 1 - body: { foo: bar } - - - match: { result: created } - - - do: - index: - index: test_index - type: test - id: 1 - body: { foo: bar } - op_type: index - - - match: { result: updated } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml index 073a4704b4ef8..54f203e3621bc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml @@ -12,7 +12,6 @@ - is_true: _id - match: { _index: test_1 } - - match: { _type: _doc } - match: { _version: 1 } - set: { _id: id } @@ -22,7 +21,6 @@ id: '$id' - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: $id } - match: { _version: 1 } - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/16_without_id_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/16_without_id_with_types.yml deleted file mode 100644 index 3fff0512b9602..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/16_without_id_with_types.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -"Index without ID": - - - do: - index: - index: test_1 - type: test - body: { foo: bar } - - - is_true: _id - - match: { _index: test_1 } - - match: { _type: test } - - match: { _version: 1 } - - set: { _id: id } - - - do: - get: - index: test_1 - type: test - id: '$id' - - - match: { _index: test_1 } - - match: { _type: test } - - match: { _id: $id } - - match: { _version: 1 } - - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/21_optype_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/21_optype_with_types.yml deleted file mode 100644 index 60ae26d46d07d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/21_optype_with_types.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -"Optype": - - - do: - index: - index: test_1 - type: test - id: 1 - op_type: create - body: { foo: bar } - - - do: - catch: conflict - index: - index: test_1 - type: test - id: 1 - op_type: create - body: { foo: bar } - - - do: - index: - index: test_1 - type: test - id: 1 - op_type: index - body: { foo: bar } - - - match: { _version: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_cas.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_cas.yml index 550582e9816eb..27534131782a5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_cas.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_cas.yml @@ -1,10 +1,5 @@ --- "Compare And Swap Sequence Numbers": - - - skip: - version: " - 6.99.99" - reason: typesless api was introduces in 7.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/37_external_version_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/37_external_version_with_types.yml deleted file mode 100644 index f17e6b749319d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/37_external_version_with_types.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -"External version": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 0 - - - match: { _version: 0 } - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 5 - - - match: { _version: 5 } - - - do: - catch: conflict - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 5 - - - do: - catch: conflict - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 0 - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 6 - - - match: { _version: 6} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/38_external_gte_version_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/38_external_gte_version_with_types.yml deleted file mode 100644 index dccbe02ea1400..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/38_external_gte_version_with_types.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -"External GTE version": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 0 - - - match: { _version: 0} - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 5 - - - match: { _version: 5} - - - do: - catch: conflict - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 0 - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar2 } - version_type: external_gte - version: 5 - - - match: { _version: 5} - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar2 } - version_type: external_gte - version: 6 - - - match: { _version: 6} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/41_routing_with_types.yml deleted file mode 100644 index 5b0cf94f4236b..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/41_routing_with_types.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -"Routing": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - routing: 5 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 5 - stored_fields: [_routing] - - - match: { _id: "1"} - - match: { _routing: "5"} - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/61_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/61_refresh_with_types.yml deleted file mode 100644 index be44cafd43020..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/61_refresh_with_types.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -"Refresh": - - - do: - indices.create: - index: test_1 - body: - settings: - index.refresh_interval: -1 - number_of_replicas: 0 - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 1 }} - - - match: { hits.total: 0 } - - - do: - index: - index: test_1 - type: test - id: 2 - refresh: true - body: { foo: bar } - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 2 }} - - - match: { hits.total: 1 } - ---- -"When refresh url parameter is an empty string that means \"refresh immediately\"": - - do: - index: - index: test_1 - type: test - id: 1 - refresh: "" - body: { foo: bar } - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 1 }} - - - match: { hits.total: 1 } - ---- -"refresh=wait_for waits until changes are visible in search": - - do: - index: - index: index_60_refresh_1 - type: test - id: index_60_refresh_id1 - body: { foo: bar } - refresh: wait_for - - is_false: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: index_60_refresh_1 - body: - query: { term: { _id: index_60_refresh_id1 }} - - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml deleted file mode 100644 index f3629fbb7cc18..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml +++ /dev/null @@ -1,102 +0,0 @@ ---- -"Index with typeless API on an index that has types": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - index: - index: index - id: 1 - body: { foo: bar } - - - match: { _index: "index" } - - match: { _type: "_doc" } - - match: { _id: "1"} - - match: { _version: 1} - - - do: - get: # not using typeless API on purpose - index: index - type: not_doc - id: 1 - - - match: { _index: "index" } - - match: { _type: "not_doc" } # the important bit to check - - match: { _id: "1"} - - match: { _version: 1} - - match: { _source: { foo: bar }} - - - - do: - index: - index: index - body: { foo: bar } - - - match: { _index: "index" } - - match: { _type: "_doc" } - - match: { _version: 1} - - set: { _id: id } - - - do: - get: # using typeful API on purpose - index: index - type: not_doc - id: '$id' - - - match: { _index: "index" } - - match: { _type: "not_doc" } # the important bit to check - - match: { _id: $id} - - match: { _version: 1} - - match: { _source: { foo: bar }} - ---- -"Index call that introduces new field mappings": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - do: - index: - index: index - id: 2 - body: { new_field: value } - - - match: { _index: "index" } - - match: { _type: "_doc" } - - match: { _id: "2" } - - match: { _version: 1 } - - - do: - get: # using typeful API on purpose - index: index - type: not_doc - id: 2 - - - match: { _index: "index" } - - match: { _type: "not_doc" } - - match: { _id: "2" } - - match: { _version: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/20_analyze_limit.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/20_analyze_limit.yml index 87d3b77aee329..37a14d9abb669 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/20_analyze_limit.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/20_analyze_limit.yml @@ -9,9 +9,6 @@ setup: --- "_analyze with No. generated tokens less than or equal to index.analyze.max_token_count should succeed": - - skip: - version: " - 6.99.99" - reason: index.analyze.max_token_count setting has been added in 7.0.0 - do: indices.analyze: index: test_1 @@ -25,9 +22,6 @@ setup: --- "_analyze with No. generated tokens more than index.analyze.max_token_count should fail": - - skip: - version: " - 6.99.99" - reason: index.analyze.max_token_count setting has been added in 7.0.0 - do: catch: /The number of tokens produced by calling _analyze has exceeded the allowed maximum of \[3\]. This limit can be set by changing the \[index.analyze.max_token_count\] index level setting\./ indices.analyze: @@ -39,9 +33,6 @@ setup: --- "_analyze with explain with No. generated tokens more than index.analyze.max_token_count should fail": - - skip: - version: " - 6.99.99" - reason: index.analyze.max_token_count setting has been added in 7.0.0 - do: catch: /The number of tokens produced by calling _analyze has exceeded the allowed maximum of \[3\]. This limit can be set by changing the \[index.analyze.max_token_count\] index level setting\./ indices.analyze: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml index 099226e41e6d3..94b23fb63adb5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml @@ -11,10 +11,6 @@ --- "clear_cache with fielddata set to true": - - skip: - version: " - 6.2.99" - reason: fielddata was deprecated before 6.3.0 - - do: indices.clear_cache: fielddata: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index 412d29905ffc2..a4d1841ed7108 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -66,7 +66,6 @@ setup: id: "1" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "1" } - match: { _source: { foo: "hello world" } } @@ -77,7 +76,6 @@ setup: id: "2" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "2" } - match: { _source: { foo: "hello world 2" } } @@ -88,7 +86,6 @@ setup: id: "3" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "3" } - match: { _source: { foo: "hello world 3" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml index c8ede7cd90284..ce8a6604069ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -1,9 +1,5 @@ --- "Create index with mappings": - - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - do: indices.create: index: test_index @@ -19,10 +15,6 @@ --- "Create index with settings": - - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - do: indices.create: index: test_index @@ -38,10 +30,6 @@ --- "Create index": - - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - do: indices.create: index: test_index @@ -51,10 +39,6 @@ --- "Create index with wait_for_active_shards set to all": - - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - do: indices.create: index: test_index @@ -68,10 +52,6 @@ --- "Create index with aliases": - - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - do: indices.create: index: test_index @@ -102,9 +82,6 @@ --- "Create index with write aliases": - - skip: - version: " - 6.99.99" - reason: is_write_index is not implemented in ES <= 6.x - do: indices.create: index: test_index @@ -138,9 +115,6 @@ --- "Create index with explicit _doc type": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: catch: bad_request indices.create: @@ -155,19 +129,3 @@ - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "The mapping definition cannot be nested under a type [_doc] unless include_type_name is set to true." } ---- -"Create index without soft deletes": - - skip: - version: " - 7.5.99" - reason: "indices without soft deletes are deprecated in 7.6" - features: "allowed_warnings" - - - do: - allowed_warnings: - - Creating indices with soft-deletes disabled is deprecated and will be removed in future OpenSearch versions. - Please do not specify value for setting [index.soft_deletes.enabled] of index [test_index]. - indices.create: - index: test_index - body: - settings: - soft_deletes.enabled: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/11_basic_with_types.yml deleted file mode 100644 index f5aeb53751119..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/11_basic_with_types.yml +++ /dev/null @@ -1,143 +0,0 @@ ---- -"Create index with mappings": - - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - type_1: {} - - - do: - indices.get_mapping: - include_type_name: true - index: test_index - - - is_true: test_index.mappings.type_1 - ---- -"Create index with settings": - - - do: - indices.create: - include_type_name: true - index: test_index - body: - settings: - number_of_replicas: "0" - - - do: - indices.get_settings: - index: test_index - - - match: { test_index.settings.index.number_of_replicas: "0"} - ---- -"Create index": - - - do: - indices.create: - include_type_name: true - index: test_index - - - match: { acknowledged: true } - - match: { index: "test_index"} - ---- -"Create index with wait_for_active_shards set to all": - - - do: - indices.create: - include_type_name: true - index: test_index - wait_for_active_shards: all - body: - settings: - number_of_replicas: "0" - - - match: { acknowledged: true } - - match: { shards_acknowledged: true } - ---- -"Create index with aliases": - - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - type_1: - properties: - field: - type: text - aliases: - test_alias: {} - test_blias: - routing: b - test_clias: - filter: - term: - field : value - - - do: - indices.get_alias: - index: test_index - - - match: {test_index.aliases.test_blias.search_routing: b} - - match: {test_index.aliases.test_blias.index_routing: b} - - is_false: test_index.aliases.test_blias.filter - - match: {test_index.aliases.test_clias.filter.term.field: value} - - is_false: test_index.aliases.test_clias.index_routing - - is_false: test_index.aliases.test_clias.search_routing - ---- -"Create index with write aliases": - - skip: - version: " - 6.99.99" - reason: is_write_index is not implemented in ES <= 6.x - - do: - indices.create: - include_type_name: true - index: test_index - body: - aliases: - test_alias: {} - test_blias: - is_write_index: false - test_clias: - is_write_index: true - - - do: - indices.get_alias: - index: test_index - - - is_false: test_index.aliases.test_alias.is_write_index - - is_false: test_index.aliases.test_blias.is_write_index - - is_true: test_index.aliases.test_clias.is_write_index - ---- -"Create index with no type mappings": - - do: - catch: /illegal_argument_exception/ - indices.create: - include_type_name: true - index: test_index - body: - mappings: - "" : {} - ---- -"Create index with invalid mappings": - - do: - catch: /illegal_argument_exception/ - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - "": - type: keyword diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml deleted file mode 100644 index 9167574ea9a8e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -"Create a typeless index while there is a typed template": - - - skip: - version: " - 6.6.99" - reason: Merging typeless/typed mappings/templates was added in 6.7 - features: allowed_warnings - - - do: - indices.put_template: - include_type_name: true - name: test_template - body: - index_patterns: test-* - mappings: - my_type: - properties: - foo: - type: keyword - - - do: - allowed_warnings: - - "index [test-1] matches multiple legacy templates [global, test_template], composable templates will only match a single template" - indices.create: - index: test-1 - body: - mappings: - properties: - bar: - type: "long" - - - do: - indices.get_mapping: - include_type_name: true - index: test-1 - - - is_true: test-1.mappings._doc # the index creation call won - - is_false: test-1.mappings.my_type - - is_true: test-1.mappings._doc.properties.foo - - is_true: test-1.mappings._doc.properties.bar - ---- -"Create a typed index while there is a typeless template": - - - skip: - version: " - 6.6.99" - reason: Merging typeless/typed mappings/templates was added in 6.7 - features: allowed_warnings - - - do: - indices.put_template: - include_type_name: false - name: test_template - body: - index_patterns: test-* - mappings: - properties: - foo: - type: keyword - - - do: - allowed_warnings: - - "index [test-1] matches multiple legacy templates [global, test_template], composable templates will only match a single template" - indices.create: - include_type_name: true - index: test-1 - body: - mappings: - my_type: - properties: - bar: - type: "long" - - - do: - indices.get_mapping: - include_type_name: true - index: test-1 - - - is_true: test-1.mappings.my_type # the index creation call won - - is_false: test-1.mappings._doc - - is_true: test-1.mappings.my_type.properties.foo - - is_true: test-1.mappings.my_type.properties.bar - ---- -"Implicitly create a typed index while there is a typeless template": - - - skip: - version: " - 6.99.99" - reason: include_type_name only supported as of 6.7 - - - do: - indices.put_template: - include_type_name: false - name: test_template - body: - index_patterns: test-* - mappings: - properties: - foo: - type: keyword - - - do: - catch: /the final mapping would have more than 1 type/ - index: - index: test-1 - type: my_type - body: { bar: 42 } - ---- -"Implicitly create a typeless index while there is a typed template": - - - skip: - version: " - 6.99.99" - reason: needs typeless index operations to work on typed indices - features: allowed_warnings - - - do: - indices.put_template: - include_type_name: true - name: test_template - body: - index_patterns: test-* - mappings: - my_type: - properties: - foo: - type: keyword - - - do: - allowed_warnings: - - "index [test-1] matches multiple legacy templates [global, test_template], composable templates will only match a single template" - index: - index: test-1 - body: { bar: 42 } - -# ensures dynamic mapping update is visible to get_mapping - - do: - cluster.health: - wait_for_events: normal - - - do: - indices.get_mapping: - include_type_name: true - index: test-1 - - - is_true: test-1.mappings.my_type # the template is honored - - is_false: test-1.mappings._doc - - is_true: test-1.mappings.my_type.properties.foo - - is_true: test-1.mappings.my_type.properties.bar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml index 781d133153605..29b3c1208a7b5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml @@ -1,38 +1,5 @@ ---- -"Index synced flush rest test": - - skip: - version: " - 7.5.99" - reason: "synced flush is deprecated in 7.6" - features: "allowed_warnings" - - do: - indices.create: - index: testing - body: - settings: - index: - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - do: - allowed_warnings: - - Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead. - indices.flush_synced: - index: testing - - - is_false: _shards.failed - - - do: - indices.stats: {level: shards} - - - is_true: indices.testing.shards.0.0.commit.user_data.sync_id - --- "Flush stats": - - skip: - version: " - 6.2.99" - reason: periodic flush stats is introduced in 6.3.0 - do: indices.create: index: test @@ -50,7 +17,6 @@ - do: index: index: test - type: doc id: 1 body: { "message": "a long message to make a periodic flush happen after this index operation" } - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml index 413c4bcb8d28c..85267f49b1317 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/11_basic_with_types.yml @@ -44,10 +44,6 @@ setup: --- "Test include_type_name": - - skip: - version: " - 6.6.99" - reason: the include_type_name parameter is not supported before 6.7 - - do: indices.get: include_type_name: true @@ -66,10 +62,6 @@ setup: --- "Test include_type_name dafaults to false": - - skip: - version: " - 6.99.99" - reason: the include_type_name parameter default is different on 6.x and 7.0, so only test this on 7.0 clusters - - do: indices.get: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/30_wildcards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/30_wildcards.yml index 08b3009be0e88..389166a03136e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/30_wildcards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/30_wildcards.yml @@ -26,9 +26,6 @@ setup: --- "Get aliases wildcard and simple exclusion": - - skip: - version: " - 6.99.99" - reason: Exclusions in the alias expression are not handled - do: indices.get_alias: name: test_blias_2,test_alias*,-test_alias_1 @@ -41,9 +38,6 @@ setup: --- "Get aliases and wildcard exclusion": - - skip: - version: " - 6.99.99" - reason: Exclusions in the alias expression are not handled - do: indices.get_alias: name: test_alias_1,test_blias_1,-test_alias* @@ -66,9 +60,6 @@ setup: --- "Non-existent exclusion alias before wildcard returns 404": - - skip: - version: " - 6.99.99" - reason: Exclusions in the alias expression are not handled - do: catch: missing indices.get_alias: @@ -97,9 +88,6 @@ setup: --- "Missing exclusions does not fire 404": - - skip: - version: " - 6.99.99" - reason: Exclusions in the alias expression are not handled - do: indices.get_alias: name: test_alias*,-non-existent,test_blias*,-test @@ -112,9 +100,6 @@ setup: --- "Exclusion of non wildcarded aliases": - - skip: - version: " - 6.99.99" - reason: Exclusions in the alias expression are not handled - do: indices.get_alias: name: test_alias_1,test_blias_2,-test_alias*,-test_blias_2 @@ -123,9 +108,6 @@ setup: --- "Wildcard exclusions does not trigger 404": - - skip: - version: " - 6.99.99" - reason: Exclusions in the alias expression are not handled - do: catch: missing indices.get_alias: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml index 84f2a0210fcf4..b132aa6bf03de 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/10_basic.yml @@ -1,8 +1,5 @@ --- setup: - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/11_basic_with_types.yml deleted file mode 100644 index 0a7f5fa3560ba..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/11_basic_with_types.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -setup: - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - text: - type: text - ---- -"Get field mapping with no index and type": - - - do: - indices.get_field_mapping: - include_type_name: true - fields: text - - - match: {test_index.mappings.test_type.text.mapping.text.type: text} - ---- -"Get field mapping by index only": - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - fields: text - - - match: {test_index.mappings.test_type.text.mapping.text.type: text} - ---- -"Get field mapping by type & field": - - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - type: test_type - fields: text - - - match: {test_index.mappings.test_type.text.mapping.text.type: text} - ---- -"Get field mapping by type & field, with another field that doesn't exist": - - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - type: test_type - fields: [ text , text1 ] - - - match: {test_index.mappings.test_type.text.mapping.text.type: text} - - is_false: test_index.mappings.test_type.text1 - ---- -"Get field mapping with include_defaults": - - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - type: test_type - fields: text - include_defaults: true - - - match: {test_index.mappings.test_type.text.mapping.text.type: text} - - match: {test_index.mappings.test_type.text.mapping.text.analyzer: default} - ---- -"Get field mapping should work without index specifying type and fields": - - - do: - indices.get_field_mapping: - include_type_name: true - type: test_type - fields: text - - - match: {test_index.mappings.test_type.text.mapping.text.type: text} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml index 1570ded351874..be6b1c3bb6d49 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml @@ -1,8 +1,5 @@ --- "Return empty object if field doesn't exist, but type and index do": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/21_missing_field_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/21_missing_field_with_types.yml deleted file mode 100644 index 264d187ebd22d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/21_missing_field_with_types.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -"Return empty object if field doesn't exist, but type and index do": - - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - type: test_type - fields: not_existent - - - match: { '': {}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/30_missing_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/30_missing_type.yml deleted file mode 100644 index 0bf3f1f7823ee..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/30_missing_type.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -"Raise 404 when type doesn't exist": - - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - catch: missing - indices.get_field_mapping: - include_type_name: true - index: test_index - type: not_test_type - fields: text diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml index 7db61d122e7ce..2c9ff58b445df 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml @@ -1,8 +1,5 @@ --- setup: - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/51_field_wildcards_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/51_field_wildcards_with_types.yml deleted file mode 100644 index 68c183e9b292e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/51_field_wildcards_with_types.yml +++ /dev/null @@ -1,144 +0,0 @@ ---- -setup: - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - t1: - type: text - t2: - type: text - obj: - properties: - t1: - type: text - i_t1: - type: text - i_t3: - type: text - - - do: - indices.create: - include_type_name: true - index: test_index_2 - body: - mappings: - test_type_2: - properties: - t1: - type: text - t2: - type: text - obj: - properties: - t1: - type: text - i_t1: - type: text - i_t3: - type: text - ---- -"Get field mapping with * for fields": - - - do: - indices.get_field_mapping: - include_type_name: true - fields: "*" - - - match: {test_index.mappings.test_type.t1.full_name: t1 } - - match: {test_index.mappings.test_type.t2.full_name: t2 } - - match: {test_index.mappings.test_type.obj\.t1.full_name: obj.t1 } - - match: {test_index.mappings.test_type.obj\.i_t1.full_name: obj.i_t1 } - - match: {test_index.mappings.test_type.obj\.i_t3.full_name: obj.i_t3 } - ---- -"Get field mapping with t* for fields": - - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - fields: "t*" - - - match: {test_index.mappings.test_type.t1.full_name: t1 } - - match: {test_index.mappings.test_type.t2.full_name: t2 } - - length: {test_index.mappings.test_type: 2} - ---- -"Get field mapping with *t1 for fields": - - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - fields: "*t1" - - match: {test_index.mappings.test_type.t1.full_name: t1 } - - match: {test_index.mappings.test_type.obj\.t1.full_name: obj.t1 } - - match: {test_index.mappings.test_type.obj\.i_t1.full_name: obj.i_t1 } - - length: {test_index.mappings.test_type: 3} - ---- -"Get field mapping with wildcarded relative names": - - - do: - indices.get_field_mapping: - include_type_name: true - index: test_index - fields: "obj.i_*" - - match: {test_index.mappings.test_type.obj\.i_t1.full_name: obj.i_t1 } - - match: {test_index.mappings.test_type.obj\.i_t3.full_name: obj.i_t3 } - - length: {test_index.mappings.test_type: 2} - ---- -"Get field mapping should work using '_all' for indices and types": - - - do: - indices.get_field_mapping: - include_type_name: true - index: _all - type: _all - fields: "t*" - - match: {test_index.mappings.test_type.t1.full_name: t1 } - - match: {test_index.mappings.test_type.t2.full_name: t2 } - - length: {test_index.mappings.test_type: 2} - - match: {test_index_2.mappings.test_type_2.t1.full_name: t1 } - - match: {test_index_2.mappings.test_type_2.t2.full_name: t2 } - - length: {test_index_2.mappings.test_type_2: 2} - ---- -"Get field mapping should work using '*' for indices and types": - - - do: - indices.get_field_mapping: - include_type_name: true - index: '*' - type: '*' - fields: "t*" - - match: {test_index.mappings.test_type.t1.full_name: t1 } - - match: {test_index.mappings.test_type.t2.full_name: t2 } - - length: {test_index.mappings.test_type: 2} - - match: {test_index_2.mappings.test_type_2.t1.full_name: t1 } - - match: {test_index_2.mappings.test_type_2.t2.full_name: t2 } - - length: {test_index_2.mappings.test_type_2: 2} - ---- -"Get field mapping should work using comma_separated values for indices and types": - - - do: - indices.get_field_mapping: - include_type_name: true - index: 'test_index,test_index_2' - type: 'test_type,test_type_2' - fields: "t*" - - match: {test_index.mappings.test_type.t1.full_name: t1 } - - match: {test_index.mappings.test_type.t2.full_name: t2 } - - length: {test_index.mappings.test_type: 2} - - match: {test_index_2.mappings.test_type_2.t1.full_name: t1 } - - match: {test_index_2.mappings.test_type_2.t2.full_name: t2 } - - length: {test_index_2.mappings.test_type_2: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/60_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/60_mix_typeless_typeful.yml deleted file mode 100644 index 2b6433a3e98f8..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/60_mix_typeless_typeful.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -"GET mapping with typeless API on an index that has types": - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - indices.get_field_mapping: - include_type_name: false - index: index - fields: foo - - - match: { index.mappings.foo.mapping.foo.type: "keyword" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml index c3addd95469d4..e46f67326a8d2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml @@ -1,8 +1,5 @@ --- setup: - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/11_basic_with_types.yml deleted file mode 100644 index 598cc24f7806b..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/11_basic_with_types.yml +++ /dev/null @@ -1,158 +0,0 @@ ---- -setup: - - do: - indices.create: - include_type_name: true - index: test_1 - body: - mappings: - doc: {} - - do: - indices.create: - include_type_name: true - index: test_2 - body: - mappings: - doc: {} ---- -"Get /{index}/_mapping with empty mappings": - - - do: - indices.create: - index: t - - - do: - indices.get_mapping: - include_type_name: true - index: t - - - match: { t.mappings: {}} - ---- -"Get /_mapping": - - - do: - indices.get_mapping: - include_type_name: true - - - is_true: test_1.mappings.doc - - is_true: test_2.mappings.doc - ---- -"Get /{index}/_mapping": - - - do: - indices.get_mapping: - include_type_name: true - index: test_1 - - - is_true: test_1.mappings.doc - - is_false: test_2 - - ---- -"Get /{index}/_mapping/_all": - - - do: - indices.get_mapping: - include_type_name: true - index: test_1 - type: _all - - - is_true: test_1.mappings.doc - - is_false: test_2 - ---- -"Get /{index}/_mapping/*": - - - do: - indices.get_mapping: - include_type_name: true - index: test_1 - type: '*' - - - is_true: test_1.mappings.doc - - is_false: test_2 - ---- -"Get /{index}/_mapping/{type}": - - - do: - indices.get_mapping: - include_type_name: true - index: test_1 - type: doc - - - is_true: test_1.mappings.doc - - is_false: test_2 - ---- -"Get /{index}/_mapping/{type*}": - - - do: - indices.get_mapping: - include_type_name: true - index: test_1 - type: 'd*' - - - is_true: test_1.mappings.doc - - is_false: test_2 - ---- -"Get /_mapping/{type}": - - - do: - indices.get_mapping: - include_type_name: true - type: doc - - - is_true: test_1.mappings.doc - - is_true: test_2.mappings.doc - ---- -"Get /_all/_mapping/{type}": - - - do: - indices.get_mapping: - include_type_name: true - index: _all - type: doc - - - is_true: test_1.mappings.doc - - is_true: test_2.mappings.doc - ---- -"Get /*/_mapping/{type}": - - - do: - indices.get_mapping: - include_type_name: true - index: '*' - type: doc - - - is_true: test_1.mappings.doc - - is_true: test_2.mappings.doc - ---- -"Get /index,index/_mapping/{type}": - - - do: - indices.get_mapping: - include_type_name: true - index: test_1,test_2 - type: doc - - - is_true: test_1.mappings.doc - - is_true: test_2.mappings.doc - ---- -"Get /index*/_mapping/{type}": - - - do: - indices.get_mapping: - include_type_name: true - index: '*2' - type: doc - - - is_true: test_2.mappings.doc - - is_false: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml deleted file mode 100644 index f17fb6a595305..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml +++ /dev/null @@ -1,106 +0,0 @@ ---- -"Non-existent type returns 404": - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - catch: missing - indices.get_mapping: - include_type_name: true - index: test_index - type: not_test_type - - - match: { status: 404 } - - match: { error.reason: 'type[[not_test_type]] missing' } - ---- -"No type matching pattern returns 404": - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - catch: missing - indices.get_mapping: - include_type_name: true - index: test_index - type: test*,not* - - - match: { status: 404 } - - match: { error: 'type [not*] missing' } - - is_true: test_index.mappings.test_type - ---- -"Existent and non-existent type returns 404 and the existing type": - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - catch: missing - indices.get_mapping: - include_type_name: true - index: test_index - type: test_type,not_test_type - - - match: { status: 404 } - - match: { error: 'type [not_test_type] missing' } - - is_true: test_index.mappings.test_type - ---- -"Existent and non-existent types returns 404 and the existing type": - - do: - indices.create: - include_type_name: true - index: test_index - body: - mappings: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - catch: missing - indices.get_mapping: - include_type_name: true - index: test_index - type: test_type,not_test_type,another_not_test_type - - - match: { status: 404 } - - match: { error: 'types [another_not_test_type,not_test_type] missing' } - - is_true: test_index.mappings.test_type - ---- -"Type missing when no types exist": - - do: - catch: missing - indices.get_mapping: - include_type_name: true - type: not_test_type diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml index 5a7624265ecc9..1bbfbc4f4c967 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml @@ -4,7 +4,7 @@ catch: missing indices.get_mapping: index: test_index - + --- "Index missing, no indexes": - do: @@ -14,9 +14,6 @@ --- "Index missing, ignore_unavailable=true": - - skip: - version: " - 6.99.99" - reason: ignore_unavailable was ignored in previous versions - do: indices.get_mapping: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml index 15a52b7b2db25..956b80ce16b52 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/40_aliases.yml @@ -18,7 +18,6 @@ - do: indices.get_mapping: - include_type_name: false index: test_alias - match: {test_index.mappings.properties.text.type: text} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml index d3f15b3292285..7f6f3999c868d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml @@ -102,9 +102,6 @@ setup: --- "Get test-* with wildcard_expansion=none": - - skip: - version: " - 6.99.99" - reason: allow_no_indices (defaults to true) was ignored in previous versions - do: indices.get_mapping: index: test-x* @@ -113,9 +110,6 @@ setup: - match: { '': {} } --- "Get test-* with wildcard_expansion=none allow_no_indices=false": - - skip: - version: " - 6.99.99" - reason: allow_no_indices was ignored in previous versions - do: catch: missing indices.get_mapping: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/61_empty_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/61_empty_with_types.yml deleted file mode 100644 index 6da7f4a2c6946..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/61_empty_with_types.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -setup: - - - do: - indices.create: - index: test_1 - - - do: - indices.create: - index: test_2 - ---- -"Check empty mapping when getting all mappings via /_mapping": - - - do: - indices.get_mapping: - include_type_name: true - - - match: { test_1.mappings: {}} - - match: { test_2.mappings: {}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml deleted file mode 100644 index 162a8d340d48a..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -"GET mapping with typeless API on an index that has types": - - - skip: - version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - indices.get_mapping: - index: index - - - match: { index.mappings.properties.foo.type: "keyword" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/30_defaults.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/30_defaults.yml index 2e3f4af03ebef..83e77140facbc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/30_defaults.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/30_defaults.yml @@ -10,9 +10,6 @@ setup: index: test-index --- Test retrieval of default settings: - - skip: - version: " - 6.3.99" - reason: include_defaults will not work in mixed-mode clusters containing nodes pre-6.4 - do: indices.get_settings: flat_settings: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml index c1aac94bf1d84..9becbd54a3773 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - do: indices.put_template: name: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml index eb9f834ef4979..35e4c29f27d3e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -38,10 +38,6 @@ --- "Open index with wait_for_active_shards set to all": - - skip: - version: " - 6.0.99" - reason: wait_for_active_shards parameter was added in 6.1.0 - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml index ff68b04f20609..77338a6ddae0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml @@ -59,11 +59,6 @@ --- "Can set is_write_index": - - - skip: - version: " - 6.3.99" - reason: "is_write_index is only available from 6.4.0 on" - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index 338eaba8881c3..36317c7ae173c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -1,8 +1,5 @@ --- "Test Create and update mapping": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: indices.create: index: test_index @@ -53,10 +50,6 @@ --- "Create index with invalid mappings": - - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: indices.create: index: test_index @@ -72,14 +65,38 @@ --- "Put mappings with explicit _doc type": - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 + version: " - 1.99.99" + reason: "deprecation message changed in 2.0" + - do: + indices.create: + index: test_index + + - do: + catch: bad_request + indices.put_mapping: + index: test_index + body: + _doc: + properties: + field: + type: keyword + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "Types cannot be provided in put mapping requests" } +--- +"Put mappings with explicit _doc type bwc": + - skip: + version: "2.0.0 - " + reason: "old deprecation message for pre 2.0" + features: "node_selector" - do: indices.create: index: test_index - do: + node_selector: + version: " - 1.99.99" catch: bad_request indices.put_mapping: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/11_basic_with_types.yml deleted file mode 100644 index 5da9cd4bf707c..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/11_basic_with_types.yml +++ /dev/null @@ -1,74 +0,0 @@ ---- -"Test Create and update mapping": - - do: - indices.create: - index: test_index - - - do: - indices.put_mapping: - include_type_name: true - index: test_index - type: test_type - body: - test_type: - properties: - text1: - type: text - analyzer: whitespace - text2: - type: text - analyzer: whitespace - subfield.text3: - type: text - - - do: - indices.get_mapping: - include_type_name: true - index: test_index - - - match: {test_index.mappings.test_type.properties.text1.type: text} - - match: {test_index.mappings.test_type.properties.text1.analyzer: whitespace} - - match: {test_index.mappings.test_type.properties.text2.type: text} - - match: {test_index.mappings.test_type.properties.text2.analyzer: whitespace} - - - do: - indices.put_mapping: - include_type_name: true - index: test_index - type: test_type - body: - test_type: - properties: - text1: - type: text - analyzer: whitespace - fields: - text_raw: - type: keyword - - - - do: - indices.get_mapping: - include_type_name: true - index: test_index - - - match: {test_index.mappings.test_type.properties.text1.type: text} - - match: {test_index.mappings.test_type.properties.subfield.properties.text3.type: text} - - match: {test_index.mappings.test_type.properties.text1.fields.text_raw.type: keyword} - ---- -"Create index with invalid mappings": - - do: - indices.create: - index: test_index - - do: - catch: /illegal_argument_exception/ - indices.put_mapping: - include_type_name: true - index: test_index - type: test_type - body: - test_type: - properties: - "": - type: keyword diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml deleted file mode 100644 index 13cb3321841cf..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -"PUT mapping with typeless API on an index that has types": - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - indices.put_mapping: - include_type_name: false - index: index - body: - properties: - bar: - type: "long" - - - do: - indices.get_mapping: - include_type_name: false - index: index - - - match: { index.mappings.properties.foo.type: "keyword" } - - match: { index.mappings.properties.bar.type: "long" } - - - do: - indices.put_mapping: - include_type_name: false - index: index - body: - properties: - foo: - type: "keyword" # also test no-op updates that trigger special logic wrt the mapping version - - - do: - catch: /the final mapping would have more than 1 type/ - indices.put_mapping: - include_type_name: true - index: index - type: some_other_type - body: - some_other_type: - properties: - bar: - type: "long" - - ---- -"PUT mapping with _doc on an index that has types": - - - skip: - version: " - 6.6.99" - reason: include_type_name is only supported as of 6.7 - - - - do: - indices.create: - include_type_name: true - index: index - body: - mappings: - my_type: - properties: - foo: - type: "keyword" - - - do: - catch: /the final mapping would have more than 1 type/ - indices.put_mapping: - include_type_name: true - index: index - type: _doc - body: - _doc: - properties: - bar: - type: "long" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml index 182ec017e0d30..c1daa76fe3d6e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - do: indices.create: index: test_index1 @@ -162,4 +159,4 @@ setup: indices.get_mapping: {} - match: {test_index1.mappings.properties.text.type: text} - + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options_with_types.yml deleted file mode 100644 index 6f9b6f7d9ceef..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options_with_types.yml +++ /dev/null @@ -1,227 +0,0 @@ -setup: - - do: - indices.create: - index: test_index1 - - do: - indices.create: - index: test_index2 - - do: - indices.create: - index: foo - - ---- -"put one mapping per index": - - do: - indices.put_mapping: - include_type_name: true - index: test_index1 - type: test_type - body: - test_type: - properties: - text: - type: text - analyzer: whitespace - - do: - indices.put_mapping: - include_type_name: true - index: test_index2 - type: test_type - body: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - - do: - indices.get_mapping: - include_type_name: true - - - match: {test_index1.mappings.test_type.properties.text.type: text} - - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {test_index2.mappings.test_type.properties.text.type: text} - - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace} - - - match: { foo.mappings: {} } - ---- -"put mapping in _all index": - - - do: - indices.put_mapping: - include_type_name: true - index: _all - type: test_type - body: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - indices.get_mapping: - include_type_name: true - - - match: {test_index1.mappings.test_type.properties.text.type: text} - - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {test_index2.mappings.test_type.properties.text.type: text} - - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {foo.mappings.test_type.properties.text.type: text} - - match: {foo.mappings.test_type.properties.text.analyzer: whitespace} - ---- -"put mapping in * index": - - do: - indices.put_mapping: - include_type_name: true - index: "*" - type: test_type - body: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - indices.get_mapping: - include_type_name: true - - - match: {test_index1.mappings.test_type.properties.text.type: text} - - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {test_index2.mappings.test_type.properties.text.type: text} - - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {foo.mappings.test_type.properties.text.type: text} - - match: {foo.mappings.test_type.properties.text.analyzer: whitespace} - ---- -"put mapping in prefix* index": - - do: - indices.put_mapping: - include_type_name: true - index: "test_index*" - type: test_type - body: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - indices.get_mapping: - include_type_name: true - - - match: {test_index1.mappings.test_type.properties.text.type: text} - - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {test_index2.mappings.test_type.properties.text.type: text} - - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace} - - - match: { foo.mappings: {} } - ---- -"put mapping in list of indices": - - do: - indices.put_mapping: - include_type_name: true - index: [test_index1, test_index2] - type: test_type - body: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - indices.get_mapping: - include_type_name: true - - - match: {test_index1.mappings.test_type.properties.text.type: text} - - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {test_index2.mappings.test_type.properties.text.type: text} - - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace} - - - match: { foo.mappings: {} } - ---- -"put mapping with blank index": - - do: - indices.put_mapping: - include_type_name: true - type: test_type - body: - test_type: - properties: - text: - type: text - analyzer: whitespace - - - do: - indices.get_mapping: - include_type_name: true - - - match: {test_index1.mappings.test_type.properties.text.type: text} - - match: {test_index1.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {test_index2.mappings.test_type.properties.text.type: text} - - match: {test_index2.mappings.test_type.properties.text.analyzer: whitespace} - - - match: {foo.mappings.test_type.properties.text.type: text} - - match: {foo.mappings.test_type.properties.text.analyzer: whitespace} - ---- -"put mapping with missing type": - - - - do: - catch: param - indices.put_mapping: - include_type_name: true - ---- -"post a mapping with default analyzer twice": - - - do: - indices.put_mapping: - include_type_name: true - index: test_index1 - type: test_type - body: - test_type: - dynamic: false - properties: - text: - analyzer: default - type: text - - - do: - indices.put_mapping: - include_type_name: true - index: test_index1 - type: test_type - body: - test_type: - dynamic: false - properties: - text: - analyzer: default - type: text - - - do: - indices.get_mapping: - include_type_name: true - - - match: {test_index1.mappings.test_type.properties.text.type: text} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml index f7a83442ca2e2..0b4e34d2740b5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml @@ -1,9 +1,5 @@ --- "Put template": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - - do: indices.put_template: name: test @@ -28,10 +24,6 @@ --- "Put multiple template": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - - do: indices.put_template: name: test @@ -56,10 +48,6 @@ --- "Put template with empty mappings": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - - do: indices.put_template: name: test @@ -241,10 +229,6 @@ --- "Put template with explicit _doc type": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - - do: catch: bad_request indices.put_template: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml index 342adced0640d..dc68ffc9a3b86 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml @@ -16,20 +16,17 @@ - do: index: index: logs-1 - type: test id: "1" body: { "foo": "hello world" } # make this doc visible in index stats refresh: true - + - do: get: index: logs_search - type: test id: "1" - match: { _index: logs-1 } - - match: { _type: test } - match: { _id: "1" } - match: { _source: { foo: "hello world" } } @@ -59,7 +56,6 @@ - do: index: index: logs-000002 - type: test id: "2" body: { "foo": "hello world" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml index ec9fabe02595d..f5d223259dc06 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml @@ -17,7 +17,6 @@ - do: index: index: logs-1 - type: test id: "1" body: { "foo": "hello world" } refresh: true @@ -38,7 +37,6 @@ - do: index: index: logs-1 - type: test id: "2" body: { "foo": "hello world" } refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml index 6e4df0f292915..95c0ff509f304 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml @@ -1,10 +1,5 @@ --- "Rollover with max_size condition": - - - skip: - version: " - 6.0.99" - reason: max_size condition is introduced in 6.1.0 - # create index with alias and replica - do: indices.create: @@ -18,7 +13,6 @@ - do: index: index: logs-1 - type: doc id: "1" body: { "foo": "hello world" } refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/40_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/40_mapping.yml index 47b004326a457..040ffd534c0ab 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/40_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/40_mapping.yml @@ -1,9 +1,5 @@ --- "Typeless mapping": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - - do: indices.create: index: logs-1 @@ -44,10 +40,6 @@ --- "Mappings with explicit _doc type": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0 - - do: indices.create: index: logs-1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/41_mapping_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/41_mapping_with_types.yml deleted file mode 100644 index 36389f3ce8bba..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/41_mapping_with_types.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -"Typeless mapping": - - skip: - version: " - 6.99.99" - reason: include_type_name defaults to true before 7.0.0 - - - do: - indices.create: - index: logs-1 - body: - aliases: - logs_search: {} - - # index first document and wait for refresh - - do: - index: - index: logs-1 - type: test - id: "1" - body: { "foo": "hello world" } - refresh: true - - # index second document and wait for refresh - - do: - index: - index: logs-1 - type: test - id: "2" - body: { "foo": "hello world" } - refresh: true - - # perform alias rollover with new typeless mapping - - do: - indices.rollover: - include_type_name: true - alias: "logs_search" - body: - conditions: - max_docs: 2 - mappings: - _doc: - properties: - foo2: - type: keyword - - - match: { conditions: { "[max_docs: 2]": true } } - - match: { rolled_over: true } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml index 37602774474a1..bda7788354b47 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml @@ -25,7 +25,6 @@ - do: index: index: index1 - type: type body: { foo: bar } refresh: true @@ -53,7 +52,6 @@ - do: index: index: index1 - type: type body: { foo: bar } refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shard_stores/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shard_stores/10_basic.yml index b6f1c7e552790..1f621c2e50b9d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shard_stores/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shard_stores/10_basic.yml @@ -24,7 +24,6 @@ - do: index: index: index1 - type: type body: { foo: bar } refresh: true @@ -59,13 +58,11 @@ - do: index: index: index1 - type: type body: { foo: bar } refresh: true - do: index: index: index2 - type: type body: { foo: bar } refresh: true - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 41c851b71cc6c..a5b1cb8607b3a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -1,10 +1,5 @@ --- "Shrink index via API": - - skip: - version: " - 6.9.99" - reason: expects warnings that pre-7.0.0 will not send - features: [warnings, arbitrary_key] - # creates an index with one document solely allocated on a particular data node # and shrinks it into a new index with a single shard # we don't do the relocation to a single node after the index is created @@ -40,7 +35,6 @@ id: "1" - match: { _index: source } - - match: { _type: _doc } - match: { _id: "1" } - match: { _source: { foo: "hello world" } } @@ -78,6 +72,5 @@ id: "1" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "1" } - match: { _source: { foo: "hello world" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 8fe8643d049ea..a744895c4ce38 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -1,10 +1,7 @@ --- "Copy settings during shrink index": - skip: - version: " - 6.9.99" - reason: expects warnings that pre-7.0.0 will not send - features: [allowed_warnings, arbitrary_key] - + features: allowed_warnings - do: nodes.info: node_id: data:true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 2baa82ea78842..4ae1d0002a237 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -29,11 +29,6 @@ setup: --- "Split index via API": - - skip: - version: " - 6.9.99" - reason: pre-7.0.0 will send warnings - features: "warnings" - # make it read-only - do: indices.put_settings: @@ -69,7 +64,6 @@ setup: id: "1" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "1" } - match: { _source: { foo: "hello world" } } @@ -80,7 +74,6 @@ setup: id: "2" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "2" } - match: { _source: { foo: "hello world 2" } } @@ -91,16 +84,12 @@ setup: id: "3" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "3" } - match: { _source: { foo: "hello world 3" } } --- "Split from 1 to N": - - skip: - version: " - 6.99.99" - reason: automatic preparation for splitting was added in 7.0.0 - do: indices.create: index: source_one_shard @@ -162,7 +151,6 @@ setup: id: "1" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "1" } - match: { _source: { foo: "hello world" } } @@ -173,7 +161,6 @@ setup: id: "2" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "2" } - match: { _source: { foo: "hello world 2" } } @@ -184,17 +171,11 @@ setup: id: "3" - match: { _index: target } - - match: { _type: _doc } - match: { _id: "3" } - match: { _source: { foo: "hello world 3" } } --- "Create illegal split indices": - - skip: - version: " - 6.9.99" - reason: pre-7.0.0 will send warnings - features: "warnings" - # try to do an illegal split with number_of_routing_shards set - do: catch: /illegal_argument_exception/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 3740167a0253a..c86e49aac0561 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -1,10 +1,5 @@ --- "Split index ignores target template mapping": - - skip: - version: " - 6.9.99" - reason: pre-7.0.0 will send warnings - features: "warnings" - # create index - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 46517f6055f32..0ceacf1f064ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -1,10 +1,7 @@ --- "Copy settings during split index": - skip: - version: " - 6.9.99" - reason: expects warnings that pre-7.0.0 will not send - features: [arbitrary_key, allowed_warnings] - + features: allowed_warnings - do: nodes.info: node_id: data:true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml index 1a650ee88eae6..54b4163ee6502 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml @@ -39,10 +39,6 @@ setup: --- "Index - all": - - skip: - version: " - 6.3.99" - reason: "uuid is only available from 6.4.0 on" - - do: indices.stats: { index: _all } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/15_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/15_types.yml deleted file mode 100644 index e2f31c3405707..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/15_types.yml +++ /dev/null @@ -1,81 +0,0 @@ ---- -setup: - - - do: - index: - index: test1 - type: bar - id: 1 - body: { "bar": "bar", "baz": "baz" } - - - do: - index: - index: test2 - type: baz - id: 1 - body: { "bar": "bar", "baz": "baz" } - - ---- -"Types - blank": - - do: - indices.stats: {} - - - match: { _all.primaries.indexing.index_total: 2 } - - is_false: _all.primaries.indexing.types - ---- -"Types - one": - - do: - indices.stats: { types: bar } - - - match: { _all.primaries.indexing.types.bar.index_total: 1 } - - is_false: _all.primaries.indexing.types.baz - ---- -"Types - multi": - - do: - indices.stats: { types: "bar,baz" } - - - match: { _all.primaries.indexing.types.bar.index_total: 1 } - - match: { _all.primaries.indexing.types.baz.index_total: 1 } - ---- -"Types - star": - - do: - indices.stats: { types: "*" } - - - match: { _all.primaries.indexing.types.bar.index_total: 1 } - - match: { _all.primaries.indexing.types.baz.index_total: 1 } - ---- -"Types - pattern": - - do: - indices.stats: { types: "*r" } - - - match: { _all.primaries.indexing.types.bar.index_total: 1 } - - is_false: _all.primaries.indexing.types.baz - ---- -"Types - _all metric": - - do: - indices.stats: { types: bar, metric: _all } - - - match: { _all.primaries.indexing.types.bar.index_total: 1 } - - is_false: _all.primaries.indexing.types.baz - ---- -"Types - indexing metric": - - do: - indices.stats: { types: bar, metric: indexing } - - - match: { _all.primaries.indexing.types.bar.index_total: 1 } - - is_false: _all.primaries.indexing.types.baz - ---- -"Types - multi metric": - - do: - indices.stats: { types: bar, metric: [ indexing, search ] } - - - match: { _all.primaries.indexing.types.bar.index_total: 1 } - - is_false: _all.primaries.indexing.types.baz diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index a308f798df692..e817bc0d27337 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -1,83 +1,5 @@ --- -"Translog retention without soft_deletes": - - skip: - version: " - 7.5.99" - reason: "indices without soft deletes are deprecated in 7.6" - features: "allowed_warnings" - - - do: - indices.create: - index: test - body: - settings: - soft_deletes.enabled: false - allowed_warnings: - - Creating indices with soft-deletes disabled is deprecated and will be removed in future OpenSearch versions. - Please do not specify value for setting [index.soft_deletes.enabled] of index [test]. - - do: - cluster.health: - wait_for_no_initializing_shards: true - wait_for_events: languid - - do: - indices.stats: - metric: [ translog ] - - set: { indices.test.primaries.translog.size_in_bytes: creation_size } - - - do: - index: - index: test - id: 1 - body: { "foo": "bar" } - - - do: - indices.stats: - metric: [ translog ] - - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } - - match: { indices.test.primaries.translog.operations: 1 } -# we can't check this yet as creation size will contain two empty translog generations. A single -# non empty generation with one op may be smaller or larger than that. -# - gt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - - match: { indices.test.primaries.translog.uncommitted_operations: 1 } - - - do: - indices.flush: - index: test - - - do: - indices.stats: - metric: [ translog ] - - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } - - match: { indices.test.primaries.translog.operations: 1 } - ## creation translog size has some overhead due to an initial empty generation that will be trimmed later - - lt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - - match: { indices.test.primaries.translog.uncommitted_operations: 0 } - - - do: - indices.put_settings: - index: test - body: - index.translog.retention.size: -1 - index.translog.retention.age: -1 - - - do: - indices.flush: - index: test - force: true # force flush as we don't have pending ops - - - do: - indices.stats: - metric: [ translog ] - ## creation translog size has some overhead due to an initial empty generation that will be trimmed later - - lte: { indices.test.primaries.translog.size_in_bytes: $creation_size } - - match: { indices.test.primaries.translog.operations: 0 } - - lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - - match: { indices.test.primaries.translog.uncommitted_operations: 0 } - ---- -"Translog retention with soft_deletes": - - skip: - version: " - 7.3.99" - reason: "start ignoring translog retention policy with soft-deletes enabled in 7.4" +"Translog retention": - do: indices.create: index: test @@ -164,9 +86,6 @@ --- "Translog last modified age stats": - - skip: - version: " - 6.2.99" - reason: translog last modified age stats was added in 6.3.0 - do: index: index: test @@ -179,70 +98,7 @@ - gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 } --- -"Translog stats on closed indices without soft-deletes": - - skip: - version: " - 7.5.99" - reason: "indices without soft deletes are deprecated in 7.6" - features: "allowed_warnings" - - - do: - indices.create: - index: test - body: - settings: - soft_deletes.enabled: false - routing.rebalance.enable: "none" # prevents shard relocations while we are closing an index - allowed_warnings: - - Creating indices with soft-deletes disabled is deprecated and will be removed in future OpenSearch versions. - Please do not specify value for setting [index.soft_deletes.enabled] of index [test]. - - - do: - cluster.health: - wait_for_no_initializing_shards: true - wait_for_events: languid - - do: - index: - index: test - id: 1 - body: { "foo": "bar" } - - - do: - index: - index: test - id: 2 - body: { "foo": "bar" } - - - do: - index: - index: test - id: 3 - body: { "foo": "bar" } - - - do: - indices.stats: - metric: [ translog ] - - match: { indices.test.primaries.translog.operations: 3 } - - match: { indices.test.primaries.translog.uncommitted_operations: 3 } - - - do: - indices.close: - index: test - wait_for_active_shards: 1 - - is_true: acknowledged - - - do: - indices.stats: - metric: [ translog ] - expand_wildcards: all - forbid_closed_indices: false - - match: { indices.test.primaries.translog.operations: 3 } - - match: { indices.test.primaries.translog.uncommitted_operations: 0 } - ---- -"Translog stats on closed indices with soft-deletes": - - skip: - version: " - 7.3.99" - reason: "start ignoring translog retention policy with soft-deletes enabled in 7.4" +"Translog stats on closed indices": - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/10_basic.yml index 798d699ae80a0..854c460c535c0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/10_basic.yml @@ -1,9 +1,5 @@ --- "Basic multi-get": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_2 @@ -26,17 +22,14 @@ - is_false: docs.0.found - match: { docs.0._index: test_2 } - - match: { docs.0._type: null } - match: { docs.0._id: "1" } - is_false: docs.1.found - match: { docs.1._index: test_1 } - - match: { docs.1._type: _doc } - match: { docs.1._id: "2" } - is_true: docs.2.found - match: { docs.2._index: test_1 } - - match: { docs.2._type: _doc } - match: { docs.2._id: "1" } - match: { docs.2._version: 1 } - match: { docs.2._source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/11_default_index_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/11_default_index_type.yml deleted file mode 100644 index 773b7e3bcfe6b..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/11_default_index_type.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -"Default index/type": - - do: - indices.create: - index: test_2 - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - mget: - index: test_1 - type: test - body: - docs: - - { _index: test_2, _id: 1} - - { _type: none, _id: 1} - - { _id: 2} - - { _id: 1} - - - is_false: docs.0.found - - match: { docs.0._index: test_2 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } - - - is_false: docs.1.found - - match: { docs.1._index: test_1 } - - match: { docs.1._type: none } - - match: { docs.1._id: "1" } - - - is_false: docs.2.found - - match: { docs.2._index: test_1 } - - match: { docs.2._type: test } - - match: { docs.2._id: "2" } - - - is_true: docs.3.found - - match: { docs.3._index: test_1 } - - match: { docs.3._type: test } - - match: { docs.3._id: "1" } - - match: { docs.3._version: 1 } - - match: { docs.3._source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yml index a1101a903f896..38ca8da20dd5d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yml @@ -1,9 +1,5 @@ --- "Non-existent index": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -18,7 +14,6 @@ - is_false: docs.0.found - match: { docs.0._index: test_2 } - - match: { docs.0._type: null } - match: { docs.0._id: "1" } - do: @@ -29,5 +24,4 @@ - is_true: docs.0.found - match: { docs.0._index: test_1 } - - match: { docs.0._type: _doc } - match: { docs.0._id: "1" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/13_missing_metadata.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/13_missing_metadata.yml index 2711bed58dbb1..eb46d45f027d3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/13_missing_metadata.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/13_missing_metadata.yml @@ -1,9 +1,5 @@ --- "Missing metadata": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -43,7 +39,6 @@ - is_true: docs.0.found - match: { docs.0._index: test_1 } - - match: { docs.0._type: _doc } - match: { docs.0._id: "1" } - match: { docs.0._version: 1 } - match: { docs.0._source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml index 9c1d0242b05c9..825dc256d786a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml @@ -8,11 +8,11 @@ bulk: refresh: true body: | - {"index": {"_index": "test_1", "_type": "_doc", "_id": 1}} + {"index": {"_index": "test_1", "_id": 1}} { "foo": "bar" } - {"index": {"_index": "test_2", "_type": "_doc", "_id": 2}} + {"index": {"_index": "test_2", "_id": 2}} { "foo": "bar" } - {"index": {"_index": "test_3", "_type": "_doc", "_id": 3}} + {"index": {"_index": "test_3", "_id": 3}} { "foo": "bar" } - do: @@ -34,12 +34,10 @@ - is_true: docs.0.found - match: { docs.0._index: test_1 } - - match: { docs.0._type: _doc } - match: { docs.0._id: "1" } - is_false: docs.1.found - match: { docs.1._index: test_two_and_three } - - match: { docs.1._type: null } - match: { docs.1._id: "2" } - match: { docs.1.error.root_cause.0.type: "illegal_argument_exception" } - match: { docs.1.error.root_cause.0.reason: "/[aA]lias.\\[test_two_and_three\\].has.more.than.one.index.associated.with.it.\\[test_[23]{1},.test_[23]{1}\\],.can't.execute.a.single.index.op/" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml index fbdc9b265a95a..f71b5e86dab56 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml @@ -1,9 +1,5 @@ --- "IDs": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 @@ -28,14 +24,12 @@ - is_true: docs.0.found - match: { docs.0._index: test_1 } - - match: { docs.0._type: _doc } - match: { docs.0._id: "1" } - match: { docs.0._version: 1 } - match: { docs.0._source: { foo: bar }} - is_false: docs.1.found - match: { docs.1._index: test_1 } - - match: { docs.1._type: _doc } - match: { docs.1._id: "3" } - do: @@ -46,14 +40,12 @@ - is_true: docs.0.found - match: { docs.0._index: test_1 } - - match: { docs.0._type: _doc } - match: { docs.0._id: "1" } - match: { docs.0._version: 1 } - match: { docs.0._source: { foo: bar }} - is_true: docs.1.found - match: { docs.1._index: test_1 } - - match: { docs.1._type: _doc } - match: { docs.1._id: "2" } - match: { docs.1._version: 1 } - match: { docs.1._source: { foo: baz }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/16_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/16_basic_with_types.yml deleted file mode 100644 index 0850772ad426c..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/16_basic_with_types.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -"Basic multi-get": - - do: - indices.create: - index: test_2 - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - indices.refresh: {} - - - do: - mget: - body: - docs: - - { _index: test_2, _type: test, _id: 1} - - { _index: test_1, _type: none, _id: 1} - - { _index: test_1, _type: test, _id: 2} - - { _index: test_1, _type: test, _id: 1} - - - is_false: docs.0.found - - match: { docs.0._index: test_2 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } - - - is_false: docs.1.found - - match: { docs.1._index: test_1 } - - match: { docs.1._type: none } - - match: { docs.1._id: "1" } - - - is_false: docs.2.found - - match: { docs.2._index: test_1 } - - match: { docs.2._type: test } - - match: { docs.2._id: "2" } - - - is_true: docs.3.found - - match: { docs.3._index: test_1 } - - match: { docs.3._type: test } - - match: { docs.3._id: "1" } - - match: { docs.3._version: 1 } - - match: { docs.3._source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/17_default_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/17_default_index.yml index d03f99be39517..15fd4320851e6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/17_default_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/17_default_index.yml @@ -1,9 +1,5 @@ --- "Default index/type": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_2 @@ -24,17 +20,14 @@ - is_false: docs.0.found - match: { docs.0._index: test_2 } - - match: { docs.0._type: null } - match: { docs.0._id: "1" } - is_false: docs.1.found - match: { docs.1._index: test_1 } - - match: { docs.1._type: _doc } - match: { docs.1._id: "2" } - is_true: docs.2.found - match: { docs.2._index: test_1 } - - match: { docs.2._type: _doc } - match: { docs.2._id: "1" } - match: { docs.2._version: 1 } - match: { docs.2._source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/18_non_existent_index_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/18_non_existent_index_with_types.yml deleted file mode 100644 index 0623464225072..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/18_non_existent_index_with_types.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -"Non-existent index": - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - mget: - body: - docs: - - { _index: test_2, _type: test, _id: 1} - - - is_false: docs.0.found - - match: { docs.0._index: test_2 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } - - - do: - mget: - body: - docs: - - { _index: test_1, _type: test, _id: 1} - - - is_true: docs.0.found - - match: { docs.0._index: test_1 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/19_missing_metadata_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/19_missing_metadata_with_types.yml deleted file mode 100644 index d7af1797f7a40..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/19_missing_metadata_with_types.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -"Missing metadata": - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - catch: /action_request_validation_exception.+ id is missing/ - mget: - body: - docs: - - { _index: test_1, _type: test} - - - do: - catch: /action_request_validation_exception.+ index is missing/ - mget: - body: - docs: - - { _type: test, _id: 1} - - - do: - catch: /action_request_validation_exception.+ no documents to get/ - mget: - body: - docs: [] - - - do: - catch: /action_request_validation_exception.+ no documents to get/ - mget: - body: {} - - - do: - mget: - body: - docs: - - { _index: test_1, _id: 1} - - - is_true: docs.0.found - - match: { docs.0._index: test_1 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } - - match: { docs.0._version: 1 } - - match: { docs.0._source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml index 45460deb04e0b..1c965a9573ae0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/20_stored_fields.yml @@ -1,9 +1,5 @@ --- "Stored fields": - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/21_alias_to_multiple_indices_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/21_alias_to_multiple_indices_with_types.yml deleted file mode 100644 index 8b8ff6cbdb76c..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/21_alias_to_multiple_indices_with_types.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -"Multi Get with alias that resolves to multiple indices": - - skip: - version: " - 7.8.99" - reason: "message was changed to fix grammar in 7.9" - - - do: - bulk: - refresh: true - body: | - {"index": {"_index": "test_1", "_type": "test", "_id": 1}} - { "foo": "bar" } - {"index": {"_index": "test_2", "_type": "test", "_id": 2}} - { "foo": "bar" } - {"index": {"_index": "test_3", "_type": "test", "_id": 3}} - { "foo": "bar" } - - - do: - indices.put_alias: - index: test_2 - name: test_two_and_three - - - do: - indices.put_alias: - index: test_3 - name: test_two_and_three - - - do: - mget: - body: - docs: - - { _index: test_1, _type: test, _id: 1} - - { _index: test_two_and_three, _type: test, _id: 2} - - - is_true: docs.0.found - - match: { docs.0._index: test_1 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } - - - is_false: docs.1.found - - match: { docs.1._index: test_two_and_three } - - match: { docs.1._type: test } - - match: { docs.1._id: "2" } - - match: { docs.1.error.root_cause.0.type: "illegal_argument_exception" } - - match: { docs.1.error.root_cause.0.reason: "/[aA]lias.\\[test_two_and_three\\].has.more.than.one.index.associated.with.it.\\[test_[23]{1},.test_[23]{1}\\],.can't.execute.a.single.index.op/" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/22_ids_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/22_ids_with_types.yml deleted file mode 100644 index 6c233e4d92a9c..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/22_ids_with_types.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -"IDs": - - do: - indices.create: - index: test_1 - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - index: - index: test_1 - type: test - id: 2 - body: { foo: baz } - - - do: - mget: - index: test_1 - type: test - body: - ids: [1, 3] - - - is_true: docs.0.found - - match: { docs.0._index: test_1 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } - - match: { docs.0._version: 1 } - - match: { docs.0._source: { foo: bar }} - - - is_false: docs.1.found - - match: { docs.1._index: test_1 } - - match: { docs.1._type: test } - - match: { docs.1._id: "3" } - - - do: - mget: - index: test_1 - body: - ids: [1, 2] - - - is_true: docs.0.found - - match: { docs.0._index: test_1 } - - match: { docs.0._type: test } - - match: { docs.0._id: "1" } - - match: { docs.0._version: 1 } - - match: { docs.0._source: { foo: bar }} - - - is_true: docs.1.found - - match: { docs.1._index: test_1 } - - match: { docs.1._type: test } - - match: { docs.1._id: "2" } - - match: { docs.1._version: 1 } - - match: { docs.1._source: { foo: baz }} - - - - do: - catch: /action_request_validation_exception.+ no documents to get/ - mget: - index: test_1 - body: - ids: [] - - - do: - catch: /action_request_validation_exception.+ no documents to get/ - mget: - index: test_1 - body: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/23_stored_fields_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/23_stored_fields_with_types.yml deleted file mode 100644 index 05b9738d46180..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/23_stored_fields_with_types.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -"Stored fields": - - - do: - indices.create: - include_type_name: true - index: test_1 - body: - mappings: - test: - properties: - foo: - type: keyword - store: true - count: - type: integer - store: true - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - mget: - index: test_1 - type: test - body: - docs: - - { _id: 1 } - - { _id: 1, stored_fields: foo } - - { _id: 1, stored_fields: [foo] } - - { _id: 1, stored_fields: [foo, _source] } - - - is_false: docs.0.fields - - match: { docs.0._source: { foo: bar }} - - - match: { docs.1.fields.foo: [bar] } - - is_false: docs.1._source - - - match: { docs.2.fields.foo: [bar] } - - is_false: docs.2._source - - - match: { docs.3.fields.foo: [bar] } - - match: { docs.3._source: { foo: bar }} - - - do: - mget: - index: test_1 - type: test - stored_fields: foo - body: - docs: - - { _id: 1 } - - { _id: 1, stored_fields: foo } - - { _id: 1, stored_fields: [foo] } - - { _id: 1, stored_fields: [foo, _source] } - - - match: { docs.0.fields.foo: [bar] } - - is_false: docs.0._source - - - match: { docs.1.fields.foo: [bar] } - - is_false: docs.1._source - - - match: { docs.2.fields.foo: [bar] } - - is_false: docs.2._source - - - match: { docs.3.fields.foo: [bar] } - - match: { docs.3._source: { foo: bar }} - - - do: - mget: - index: test_1 - type: test - stored_fields: [foo] - body: - docs: - - { _id: 1 } - - { _id: 1, stored_fields: foo } - - { _id: 1, stored_fields: [foo] } - - { _id: 1, stored_fields: [foo, _source] } - - - match: { docs.0.fields.foo: [bar] } - - is_false: docs.0._source - - - match: { docs.1.fields.foo: [bar] } - - is_false: docs.1._source - - - match: { docs.2.fields.foo: [bar] } - - is_false: docs.2._source - - - match: { docs.3.fields.foo: [bar] } - - match: { docs.3._source: { foo: bar }} - - - do: - mget: - index: test_1 - type: test - stored_fields: [foo, _source] - body: - docs: - - { _id: 1 } - - { _id: 1, stored_fields: foo } - - { _id: 1, stored_fields: [foo] } - - { _id: 1, stored_fields: [foo, _source] } - - - match: { docs.0.fields.foo: [bar] } - - match: { docs.0._source: { foo: bar }} - - - match: { docs.1.fields.foo: [bar] } - - is_false: docs.1._source - - - match: { docs.2.fields.foo: [bar] } - - is_false: docs.2._source - - - match: { docs.3.fields.foo: [bar] } - - match: { docs.3._source: { foo: bar }} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml index df2924f274bdf..50bf9a158852b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/40_routing.yml @@ -40,6 +40,5 @@ - is_true: docs.2.found - match: { docs.2._index: test_1 } - - match: { docs.2._type: _doc } - match: { docs.2._id: "1" } - match: { docs.2._routing: "5" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/41_routing_with_types.yml deleted file mode 100644 index d550dd26657c9..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/41_routing_with_types.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -"Routing": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - routing: 5 - body: { foo: bar } - - - do: - mget: - index: test_1 - type: test - stored_fields: [_routing] - body: - docs: - - { _id: 1 } - - { _id: 1, routing: 4 } - - { _id: 1, routing: 5 } - - - is_false: docs.0.found - - is_false: docs.1.found - - - is_true: docs.2.found - - match: { docs.2._index: test_1 } - - match: { docs.2._type: test } - - match: { docs.2._id: "1" } - - match: { docs.2._routing: "5" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/61_realtime_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/61_realtime_refresh_with_types.yml deleted file mode 100644 index 0cb7b71cf4368..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/61_realtime_refresh_with_types.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -"Realtime Refresh": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - refresh_interval: -1 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - mget: - index: test_1 - type: test - realtime: false - body: - ids: [1] - - - is_false: docs.0.found - - - do: - mget: - index: test_1 - type: test - realtime: true - body: - ids: [1] - - - is_true: docs.0.found - - - do: - mget: - index: test_1 - type: test - realtime: false - refresh: true - body: - ids: [1] - - - is_true: docs.0.found diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml index 3a3086cf3616d..35a85cf9c0116 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/71_source_filtering_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/71_source_filtering_with_types.yml deleted file mode 100644 index 4581e060b41a7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/71_source_filtering_with_types.yml +++ /dev/null @@ -1,119 +0,0 @@ -setup: - - do: - index: - index: test_1 - type: test - id: 1 - body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } - - do: - index: - index: test_1 - type: test - id: 2 - body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } - ---- -"Source filtering - true/false": - - - do: - mget: - body: - docs: - - { _index: "test_1", _type: "test", _id: "1", _source: false } - - { _index: "test_1", _type: "test", _id: "2", _source: true } - - - match: { docs.0._id: "1" } - - is_false: docs.0._source - - match: { docs.1._id: "2" } - - is_true: docs.1._source - ---- -"Source filtering - include field": - - - do: - mget: - body: - docs: - - { _index: "test_1", _type: "test", _id: "1", _source: include.field1 } - - { _index: "test_1", _type: "test", _id: "2", _source: [ include.field1 ] } - - - match: { docs.0._source: { include: { field1: v1 }} } - - match: { docs.1._source: { include: { field1: v1 }} } - - ---- -"Source filtering - include nested field": - - - do: - mget: - body: - docs: - - { _index: "test_1", _type: "test", _id: "1", _source: { include: include.field1 } } - - { _index: "test_1", _type: "test", _id: "2", _source: { include: [ include.field1 ] } } - - - match: { docs.0._source: { include: { field1: v1 }} } - - match: { docs.1._source: { include: { field1: v1 }} } - ---- -"Source filtering - exclude field": - - - do: - mget: - body: - docs: - - { _index: "test_1", _type: "test", _id: "1", _source: { include: [ include ], exclude: [ "*.field2" ] } } - - - match: { docs.0._source: { include: { field1: v1 }} } - ---- -"Source filtering - ids and true/false": - - - do: - mget: - _source: false - index: test_1 - body: { ids: [ 1,2 ] } - - is_false: docs.0._source - - is_false: docs.1._source - - - do: - mget: - _source: true - index: test_1 - body: { ids: [ 1,2 ] } - - is_true: docs.0._source - - is_true: docs.1._source - ---- -"Source filtering - ids and include field": - - - do: - mget: - _source: include.field1 - index: test_1 - body: { ids: [ 1,2 ] } - - match: { docs.0._source: { include: { field1: v1 }} } - - match: { docs.1._source: { include: { field1: v1 }} } - ---- -"Source filtering - ids and include nested field": - - - do: - mget: - _source_includes: "include.field1,count" - index: test_1 - body: { ids: [ 1,2 ] } - - match: { docs.0._source: { include: { field1: v1 }, count: 1} } - - match: { docs.1._source: { include: { field1: v1 }, count: 1} } - ---- -"Source filtering - ids and exclude field": - - - do: - mget: - _source_includes: include - _source_excludes: "*.field2" - index: test_1 - body: { ids: [ 1,2 ] } - - match: { docs.0._source: { include: { field1: v1 } } } - - match: { docs.1._source: { include: { field1: v1 } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/80_deprecated.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/80_deprecated.yml index 0283455350a80..2a1b4501eae17 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/80_deprecated.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/80_deprecated.yml @@ -1,11 +1,6 @@ --- "Deprecated parameters should fail in Multi Get query": - - skip: - version: " - 6.99.99" - reason: _version, _routing are removed starting from 7.0, their equivalents without underscore are used instead - features: "warnings" - - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/80_deprecated_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/80_deprecated_with_types.yml deleted file mode 100644 index 5033f75c79426..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/80_deprecated_with_types.yml +++ /dev/null @@ -1,38 +0,0 @@ - ---- -"Deprecated parameters should fail in Multi Get query": - - - skip: - version: " - 6.99.99" - reason: _version, _routing are removed starting from 7.0, their equivalents without underscore are used instead - features: "warnings" - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - index: - index: test_1 - type: test - id: 2 - body: { foo: baz } - - - do: - catch: bad_request - mget: - body: - docs: - - { _index: test_1, _type: test, _id: 1, _routing : test1 } - - { _index: test_1, _type: test, _id: 2, _routing : test1 } - - - do: - catch: bad_request - mget: - body: - docs: - - { _index: test_1, _type: test, _id: 1, _version : 1 } - - { _index: test_1, _type: test, _id: 2, _version : 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml index bb1b25a0dcb40..50eb344d99048 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml @@ -41,12 +41,10 @@ like: - _index: test_1 - _type: _doc doc: foo: bar - _index: test_1 - _type: _doc _id: 2 - _id: 3 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml index abea4c8fbe57a..a0f96eb6b2d1f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml @@ -40,11 +40,9 @@ more_like_this: like: _index: test_1 - _type: _doc _id: 1 unlike: _index: test_1 - _type: _doc _id: 3 include: true min_doc_freq: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml index 5b092c9d15e44..3a06946bffd70 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml @@ -94,10 +94,6 @@ setup: --- "Search with new response format": - - skip: - version: " - 6.99.99" - reason: hits.total is returned as an object in 7.0.0 - - do: msearch: body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml deleted file mode 100644 index 64e88de404ab7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml +++ /dev/null @@ -1,97 +0,0 @@ ---- -setup: - - - do: - index: - index: index_1 - type: test - id: 1 - body: { foo: bar } - - - do: - index: - index: index_1 - type: test - id: 2 - body: { foo: baz } - - - do: - index: - index: index_1 - type: test - id: 3 - body: { foo: foo } - - - do: - index: - index: index_2 - type: test - id: 1 - body: { foo: foo } - - - do: - indices.refresh: {} - ---- -"Basic multi-search": - - - do: - msearch: - rest_total_hits_as_int: true - body: - - index: index_* - - query: - match: {foo: foo} - - index: index_2 - - query: - match_all: {} - - index: index_1 - - query: - match: {foo: foo} - - index: index_3 - - query: - match_all: {} - - type: test - - query: - match_all: {} - - - match: { responses.0.hits.total: 2 } - - match: { responses.1.hits.total: 1 } - - match: { responses.2.hits.total: 1 } - - match: { responses.3.error.root_cause.0.type: index_not_found_exception } - - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - - match: { responses.3.error.root_cause.0.index: index_3 } - - match: { responses.4.hits.total: 4 } - ---- -"Least impact smoke test": -# only passing these parameters to make sure they are consumed - - do: - msearch: - rest_total_hits_as_int: true - max_concurrent_shard_requests: 1 - max_concurrent_searches: 1 - body: - - index: index_* - - query: - match: {foo: foo} - - index: index_2 - - query: - match_all: {} - - index: index_1 - - query: - match: {foo: foo} - - index: index_3 - - query: - match_all: {} - - type: test - - query: - match_all: {} - - - match: { responses.0.hits.total: 2 } - - match: { responses.1.hits.total: 1 } - - match: { responses.2.hits.total: 1 } - - match: { responses.3.error.root_cause.0.type: index_not_found_exception } - - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - - match: { responses.3.error.root_cause.0.index: index_3 } - - match: { responses.4.hits.total: 4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml index 87c3e6065bba4..56e5b2eb88e0e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/10_basic.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: indices.create: index: testidx diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml deleted file mode 100644 index 0c037eee9ddd2..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml +++ /dev/null @@ -1,86 +0,0 @@ -setup: - - do: - indices.create: - include_type_name: true - index: testidx - body: - mappings: - testtype: - properties: - text: - type : "text" - term_vector : "with_positions_offsets" - - do: - index: - index: testidx - type: testtype - id: testing_document - body: {"text" : "The quick brown fox is brown."} - - - do: - indices.refresh: {} - ---- -"Basic tests for multi termvector get": - - - do: - mtermvectors: - "term_statistics" : true - "body" : - "docs": - - - "_index" : "testidx" - "_type" : "testtype" - "_id" : "testing_document" - - - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2} - - match: {docs.0.term_vectors.text.terms.brown.ttf: 2} - - - do: - mtermvectors: - "term_statistics" : true - "body" : - "docs": - - - "_index" : "testidx" - "_type" : "testtype" - "_id" : "testing_document" - - - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2} - - match: {docs.0.term_vectors.text.terms.brown.ttf: 2} - - - do: - mtermvectors: - "term_statistics" : true - "index" : "testidx" - "body" : - "docs": - - - "_type" : "testtype" - "_id" : "testing_document" - - - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2} - - match: {docs.0.term_vectors.text.terms.brown.ttf: 2} - - - do: - mtermvectors: - "term_statistics" : true - "index" : "testidx" - "type" : "testtype" - "body" : - "docs": - - - "_id" : "testing_document" - - - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2} - - match: {docs.0.term_vectors.text.terms.brown.ttf: 2} - - - do: - mtermvectors: - "term_statistics" : true - "index" : "testidx" - "type" : "testtype" - "ids" : ["testing_document"] - - - match: {docs.0.term_vectors.text.terms.brown.term_freq: 2} - - match: {docs.0.term_vectors.text.terms.brown.ttf: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml index 376192680c99b..215c62c2a40ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/20_deprecated.yml @@ -1,16 +1,5 @@ -setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - --- "Deprecated camel case and _ parameters should fail in Term Vectors query": - - - skip: - version: " - 6.99.99" - reason: camel case and _ parameters (e.g. versionType, _version_type) should fail from 7.0 - features: "warnings" - - do: indices.create: index: testidx @@ -47,7 +36,6 @@ setup: "docs": - "_index" : "testidx" - "_type" : "_doc" "_id" : "testing_document" "version" : 1 "_version_type" : "external" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml deleted file mode 100644 index b0335498e22a1..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml +++ /dev/null @@ -1,53 +0,0 @@ - ---- -"Deprecated camel case and _ parameters should fail in Term Vectors query": - - - skip: - version: " - 6.99.99" - reason: camel case and _ parameters (e.g. versionType, _version_type) should fail from 7.0 - features: "warnings" - - - do: - indices.create: - include_type_name: true - index: testidx - body: - mappings: - testtype: - properties: - text: - type : "text" - term_vector : "with_positions_offsets" - - - do: - index: - index: testidx - type: testtype - id: testing_document - body: {"text" : "The quick brown fox is brown."} - - - do: - catch: bad_request - mtermvectors: - "term_statistics" : true - "body" : - "docs": - - - "_index" : "testidx" - "_type" : "testtype" - "_id" : "testing_document" - "version" : 1 - "versionType" : "external" - - - do: - catch: bad_request - mtermvectors: - "term_statistics" : true - "body" : - "docs": - - - "_index" : "testidx" - "_type" : "testtype" - "_id" : "testing_document" - "version" : 1 - "_version_type" : "external" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml index aa6d1e9841dd7..9294c696d91e6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml @@ -198,9 +198,6 @@ --- "Scroll cannot used the request cache": - - skip: - version: " - 6.99.99" - reason: the error message has been added in v7.0.0 - do: indices.create: index: test_scroll @@ -217,9 +214,6 @@ --- "Scroll with size 0": - - skip: - version: " - 6.1.99" - reason: the error message has been added in v6.2.0 - do: indices.create: index: test_scroll @@ -237,10 +231,6 @@ --- "Scroll max_score is null": - - skip: - version: " - 6.99.99" - reason: max_score was set to 0 rather than null before 7.0 - - do: indices.create: index: test_scroll @@ -285,9 +275,6 @@ --- "Scroll with new response format": - - skip: - version: " - 6.9.99" - reason: hits.total is returned as an object in 7.0.0 - do: indices.create: index: test_scroll diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml index f655b43b98949..228b1a73ecd2b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml @@ -103,10 +103,6 @@ setup: --- "Sliced scroll with invalid arguments": - - skip: - version: " - 6.99.99" - reason: Prior versions return 500 rather than 404 - - do: catch: bad_request search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml index a7e75f80e3f6e..0f35f95be0bf6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml @@ -10,10 +10,6 @@ --- "Max keep alive": - - skip: - version: " - 6.99.99" - reason: search.max_keep_alive was added in 7.0.0 - - do: index: index: test_scroll diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml index ddb555b8cd5a5..d13636360d2f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml @@ -49,10 +49,6 @@ setup: --- "top_hits aggregation with nested documents": - - skip: - version: " - 6.1.99" - reason: "<= 6.1 nodes don't always include index or id in nested top hits" - - do: search: rest_total_hits_as_int: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 3683ad108e8c2..869214f9111c6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -702,11 +702,6 @@ setup: --- "Global ordinals are not loaded with the map execution hint": - - - skip: - version: " - 6.99.99" - reason: bug fixed in 7.0 - - do: index: refresh: true @@ -752,11 +747,6 @@ setup: --- "Global ordinals are loaded with the global_ordinals execution hint": - - - skip: - version: " - 6.99.99" - reason: bug fixed in 7.0 - - do: index: refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 1ec3a302d6884..2e298441918bc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -84,10 +84,6 @@ setup: --- "Simple Composite aggregation": - - skip: - version: " - 6.0.99" - reason: this uses a new API that has been added in 6.1 - - do: search: rest_total_hits_as_int: true @@ -113,11 +109,6 @@ setup: --- "Nested Composite aggregation": - - skip: - version: " - 6.0.99" - reason: this uses a new API that has been added in 6.1 - - - do: search: rest_total_hits_as_int: true @@ -163,11 +154,6 @@ setup: --- "Aggregate After": - - skip: - version: " - 6.0.99" - reason: this uses a new API that has been added in 6.1 - - - do: search: rest_total_hits_as_int: true @@ -205,11 +191,6 @@ setup: --- "Aggregate After Missing": - - skip: - version: " - 6.1.99" - reason: bug fixed in 6.2.0 - - - do: search: rest_total_hits_as_int: true @@ -236,10 +217,6 @@ setup: --- "Invalid Composite aggregation": - - skip: - version: " - 6.0.99" - reason: this uses a new API that has been added in 6.1 - - do: catch: /\[composite\] aggregation cannot be used with a parent aggregation/ search: @@ -426,10 +403,6 @@ setup: --- "Composite aggregation with after_key in the response": - - skip: - version: " - 6.2.99" - reason: starting in 6.3.0 after_key is returned in the response - - do: search: rest_total_hits_as_int: true @@ -455,10 +428,6 @@ setup: --- "Composite aggregation and array size": - - skip: - version: " - 6.99.99" - reason: starting in 7.0 the composite aggregation throws an execption if the provided size is greater than search.max_buckets. - - do: catch: /.*Trying to create too many buckets.*/ search: @@ -481,10 +450,6 @@ setup: --- "Composite aggregation with nested parent": - - skip: - version: " - 6.99.99" - reason: the ability to set a nested parent aggregation was added in 7.0. - - do: search: rest_total_hits_as_int: true @@ -1030,3 +995,35 @@ setup: - length: { aggregations.test.buckets: 1 } - match: { aggregations.test.buckets.0.key.keyword: "foo" } - match: { aggregations.test.buckets.0.doc_count: 1 } +--- +"Simple Composite aggregation with missing order": + - skip: + version: " - 1.2.99" + reason: missing_order is supported in 1.3.0. + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + "kw": { + "terms": { + "field": "keyword", + "missing_bucket": true, + "missing_order": "last" + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 3 } + - match: { aggregations.test.buckets.0.key.kw: "bar" } + - match: { aggregations.test.buckets.0.doc_count: 3 } + - match: { aggregations.test.buckets.1.key.kw: "foo" } + - match: { aggregations.test.buckets.1.doc_count: 2 } + - match: { aggregations.test.buckets.2.key.kw: null } + - match: { aggregations.test.buckets.2.doc_count: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml index 75349e9839746..339fe72b77730 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -1,8 +1,3 @@ -setup: - - skip: - version: " - 6.3.99" - reason: "moving_fn added in 6.4.0" - --- "Bad window": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml index c5988fc9e5dc4..6b17132c751de 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/260_weighted_avg.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 6.3.99" - reason: weighted_avg is only available as of 6.4.0 - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml index 0cba08fccae9b..03797503436fb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 6.5.99" - reason: "added in 6.6.0" - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml index bc4105af85e65..2b02c0447e6c8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml @@ -31,10 +31,6 @@ setup: --- "Filter aggs with terms lookup and ensure it's cached": # Because the filter agg rewrites the terms lookup in the rewrite phase the request can be cached - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: search: rest_total_hits_as_int: true @@ -78,10 +74,6 @@ setup: --- "As a child of terms": - - skip: - version: " - 6.99.99" - reason: the test is written for hits.total.value - - do: bulk: refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/51_filter_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/51_filter_with_types.yml deleted file mode 100644 index 54476ce6e65b1..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/51_filter_with_types.yml +++ /dev/null @@ -1,60 +0,0 @@ -setup: - - do: - indices.create: - include_type_name: true - index: test - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - test: - properties: - mentions: - type: keyword - notifications: - type: keyword - - - do: - index: - index: test - type: test - id: foo|bar|baz0 - body: { "notifications" : ["abc"] } - - - do: - index: - index: test - type: test - id: foo|bar|baz1 - body: { "mentions" : ["abc"] } - - - do: - indices.refresh: {} - ---- -"Filter aggs with terms lookup and ensure it's cached": - # Because the filter agg rewrites the terms lookup in the rewrite phase the request can be cached - - skip: - features: allowed_warnings - - do: - allowed_warnings: - - "Deprecated field [type] used, this field is unused and will be removed entirely" - search: - rest_total_hits_as_int: true - size: 0 - request_cache: true - body: {"aggs": { "itemsNotify": { "filter": { "terms": { "mentions": { "index": "test", "type": "test", "id": "foo|bar|baz0", "path": "notifications"}}}, "aggs": { "mentions" : {"terms" : { "field" : "mentions" }}}}}} - - # validate result - - match: { hits.total: 2 } - - match: { aggregations.itemsNotify.doc_count: 1 } - - length: { aggregations.itemsNotify.mentions.buckets: 1 } - - match: { aggregations.itemsNotify.mentions.buckets.0.key: "abc" } - # we are using a lookup - this should not cache - - do: - indices.stats: { index: test, metric: request_cache} - - match: { _shards.total: 1 } - - match: { _all.total.request_cache.hit_count: 0 } - - match: { _all.total.request_cache.miss_count: 1 } - - is_true: indices.test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index 334708b54b066..462f4f5d25e0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -28,9 +28,6 @@ setup: --- "Unified highlighter on a field WITHOUT OFFSETS exceeding index.highlight.max_analyzed_offset should FAIL": - - skip: - version: " - 6.99.99" - reason: index.highlight.max_analyzed_offset setting has been added in 7.0.0 - do: catch: bad_request search: @@ -42,9 +39,6 @@ setup: --- "Plain highlighter on a field WITHOUT OFFSETS exceeding index.highlight.max_analyzed_offset should FAIL": - - skip: - version: " - 6.99.99" - reason: index.highlight.max_analyzed_offset setting has been added in 7.0.0 - do: catch: bad_request search: @@ -56,9 +50,6 @@ setup: --- "Unified highlighter on a field WITH OFFSETS exceeding index.highlight.max_analyzed_offset should SUCCEED": - - skip: - version: " - 6.99.99" - reason: index.highligt.max_analyzed_offset setting has been added in 7.0.0 - do: search: rest_total_hits_as_int: true @@ -69,9 +60,6 @@ setup: --- "Plain highlighter on a field WITH OFFSETS exceeding index.highlight.max_analyzed_offset should FAIL": - - skip: - version: " - 6.99.99" - reason: index.highlight.max_analyzed_offset setting has been added in 7.0.0 - do: catch: bad_request search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml index c3ffd930e9e1d..7b3b4e8233d0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml @@ -30,10 +30,8 @@ setup: body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : {} } } } - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } - - match: { hits.hits.0._type: "_doc" } - match: { hits.hits.0._id: "1" } - match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._index: "test" } - - match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._type: "_doc" } - match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._id: "1" } - match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._nested.field: "nested_field" } - match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._nested.offset: 0 } @@ -63,7 +61,6 @@ setup: - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } - - match: { hits.hits.0._type: "_doc" } - match: { hits.hits.0._id: "1" } - match: { hits.hits.0._version: 1 } - match: { hits.hits.0.fields._seq_no: [0] } @@ -86,7 +83,6 @@ setup: - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } - - match: { hits.hits.0._type: "_doc" } - match: { hits.hits.0._id: "1" } - match: { hits.hits.0._version: 2 } - match: { hits.hits.0.fields._seq_no: [1] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml index a82d7fff480eb..d2933a44e586d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/100_stored_fields.yml @@ -19,7 +19,6 @@ setup: index: test - is_true: hits.hits.0._id - - is_true: hits.hits.0._type - is_true: hits.hits.0._source - do: @@ -30,7 +29,6 @@ setup: stored_fields: [] - is_true: hits.hits.0._id - - is_true: hits.hits.0._type - is_false: hits.hits.0._source - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index e89d340347a12..091638d6a07fb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -141,9 +141,6 @@ setup: --- "docvalue_fields": - - skip: - version: " - 6.9.99" - reason: Triggers a deprecation warning before 7.0 - do: search: body: @@ -152,9 +149,6 @@ setup: --- "multiple docvalue_fields": - - skip: - version: " - 6.9.99" - reason: Triggered a deprecation warning before 7.0 - do: search: body: @@ -163,9 +157,6 @@ setup: --- "docvalue_fields as url param": - - skip: - version: " - 6.99.99" - reason: Triggered a deprecation warning before 7.0 - do: search: docvalue_fields: [ "count" ] @@ -174,8 +165,6 @@ setup: --- "docvalue_fields with default format": - skip: - version: " - 6.99.99" - reason: Only triggers warnings on 7.0+ features: allowed_warnings - do: allowed_warnings: @@ -189,9 +178,6 @@ setup: --- "docvalue_fields with explicit format": - - skip: - version: " - 6.3.99" - reason: format option was added in 6.4 - do: search: body: @@ -202,9 +188,6 @@ setup: --- "docvalue_fields - double": - - skip: - version: " - 6.99.99" - reason: Triggered a deprecation warning before 7.0 - do: search: body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index ec279b8d0d5ec..149bc90f31ea0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -73,19 +73,16 @@ setup: - match: {hits.total: 6 } - length: {hits.hits: 3 } - match: {hits.hits.0._index: test } - - match: {hits.hits.0._type: _doc } - match: {hits.hits.0.fields.numeric_group: [3] } - match: {hits.hits.0.sort: [36] } - match: {hits.hits.0._id: "6" } - is_false: hits.hits.0.inner_hits - match: {hits.hits.1._index: test } - - match: {hits.hits.1._type: _doc } - match: {hits.hits.1.fields.numeric_group: [1] } - match: {hits.hits.1.sort: [24] } - match: {hits.hits.1._id: "3" } - is_false: hits.hits.1.inner_hits - match: {hits.hits.2._index: test } - - match: {hits.hits.2._type: _doc } - match: {hits.hits.2.fields.numeric_group: [25] } - match: {hits.hits.2.sort: [10] } - match: {hits.hits.2._id: "4" } @@ -111,7 +108,6 @@ setup: - match: {hits.total: 6 } - length: {hits.hits: 1 } - match: {hits.hits.0._index: test } - - match: {hits.hits.0._type: _doc } - match: {hits.hits.0.fields.numeric_group: [25]} - match: {hits.hits.0.sort: [10] } - match: {hits.hits.0._id: "4" } @@ -140,7 +136,6 @@ setup: - match: { hits.total: 6 } - length: { hits.hits: 3 } - match: { hits.hits.0._index: test } - - match: { hits.hits.0._type: _doc } - match: { hits.hits.0.fields.numeric_group: [3] } - match: { hits.hits.0.sort: [36] } - match: { hits.hits.0._id: "6" } @@ -148,7 +143,6 @@ setup: - length: { hits.hits.0.inner_hits.sub_hits.hits.hits: 1 } - match: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._id: "6" } - match: { hits.hits.1._index: test } - - match: { hits.hits.1._type: _doc } - match: { hits.hits.1.fields.numeric_group: [1] } - match: { hits.hits.1.sort: [24] } - match: { hits.hits.1._id: "3" } @@ -157,7 +151,6 @@ setup: - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._id: "2" } - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._id: "1" } - match: { hits.hits.2._index: test } - - match: { hits.hits.2._type: _doc } - match: { hits.hits.2.fields.numeric_group: [25] } - match: { hits.hits.2.sort: [10] } - match: { hits.hits.2._id: "4" } @@ -219,7 +212,6 @@ setup: - match: { hits.total: 6 } - length: { hits.hits: 3 } - match: { hits.hits.0._index: test } - - match: { hits.hits.0._type: _doc } - match: { hits.hits.0.fields.numeric_group: [3] } - match: { hits.hits.0.sort: [36] } - match: { hits.hits.0._id: "6" } @@ -227,7 +219,6 @@ setup: - length: { hits.hits.0.inner_hits.sub_hits.hits.hits: 1 } - match: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._id: "6" } - match: { hits.hits.1._index: test } - - match: { hits.hits.1._type: _doc } - match: { hits.hits.1.fields.numeric_group: [1] } - match: { hits.hits.1.sort: [24] } - match: { hits.hits.1._id: "3" } @@ -236,7 +227,6 @@ setup: - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._id: "2" } - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._id: "1" } - match: { hits.hits.2._index: test } - - match: { hits.hits.2._type: _doc } - match: { hits.hits.2.fields.numeric_group: [25] } - match: { hits.hits.2.sort: [10] } - match: { hits.hits.2._id: "4" } @@ -315,11 +305,6 @@ setup: --- "no hits and inner_hits max_score null": - - - skip: - version: " - 6.99.99" - reason: max_score was set to 0 rather than null before 7.0 - - do: search: rest_total_hits_as_int: true @@ -353,7 +338,6 @@ setup: - match: { hits.total: 6 } - length: { hits.hits: 3 } - match: { hits.hits.0._index: test } - - match: { hits.hits.0._type: _doc } - match: { hits.hits.0.fields.numeric_group: [3] } - match: { hits.hits.0.sort: [36] } - match: { hits.hits.0._id: "6" } @@ -364,7 +348,6 @@ setup: - length: { hits.hits.0.inner_hits.sub_hits_desc.hits.hits: 1 } - match: { hits.hits.0.inner_hits.sub_hits_desc.hits.hits.0._id: "6" } - match: { hits.hits.1._index: test } - - match: { hits.hits.1._type: _doc } - match: { hits.hits.1.fields.numeric_group: [1] } - match: { hits.hits.1.sort: [24] } - match: { hits.hits.1._id: "3" } @@ -376,7 +359,6 @@ setup: - length: { hits.hits.1.inner_hits.sub_hits_desc.hits.hits: 1 } - match: { hits.hits.1.inner_hits.sub_hits_desc.hits.hits.0._id: "3" } - match: { hits.hits.2._index: test } - - match: { hits.hits.2._type: _doc } - match: { hits.hits.2.fields.numeric_group: [25] } - match: { hits.hits.2.sort: [10] } - match: { hits.hits.2._id: "4" } @@ -390,11 +372,6 @@ setup: --- "field collapsing, inner_hits and version": - - - skip: - version: " - 6.1.0" - reason: "bug fixed in 6.1.1" - - do: count: index: test @@ -412,7 +389,6 @@ setup: - match: { hits.total: 6 } - length: { hits.hits: 3 } - match: { hits.hits.0._index: test } - - match: { hits.hits.0._type: _doc } - match: { hits.hits.0.fields.numeric_group: [3] } - match: { hits.hits.0.sort: [36] } - match: { hits.hits.0._id: "6" } @@ -422,7 +398,6 @@ setup: - match: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._id: "6" } - match: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._version: 66 } - match: { hits.hits.1._index: test } - - match: { hits.hits.1._type: _doc } - match: { hits.hits.1.fields.numeric_group: [1] } - match: { hits.hits.1.sort: [24] } - match: { hits.hits.1._id: "3" } @@ -434,7 +409,6 @@ setup: - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._id: "1" } - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._version: 11 } - match: { hits.hits.2._index: test } - - match: { hits.hits.2._type: _doc } - match: { hits.hits.2.fields.numeric_group: [25] } - match: { hits.hits.2.sort: [10] } - match: { hits.hits.2._id: "4" } @@ -493,11 +467,6 @@ setup: --- "field collapsing, inner_hits and seq_no": - - - skip: - version: " - 6.99.0" - reason: "sequence numbers introduced in 7.0.0" - - do: search: rest_total_hits_as_int: true @@ -532,7 +501,6 @@ setup: - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._seq_no: 0 } - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._primary_term: 1 } - match: { hits.hits.2._index: test } - - match: { hits.hits.2._type: _doc } - match: { hits.hits.2.fields.numeric_group: [25] } - match: { hits.hits.2.sort: [10] } - match: { hits.hits.2._id: "4" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml index 33b149b00a4fb..201e456be2cdd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml @@ -550,9 +550,6 @@ setup: --- "Test exists query on _index field": - - skip: - version: " - 6.0.99" - reason: exists on _index not supported prior to 6.1.0 - do: search: rest_total_hits_as_int: true @@ -566,9 +563,6 @@ setup: --- "Test exists query on _type field": - - skip: - version: " - 6.0.99" - reason: exists on _type not supported prior to 6.1.0 - do: search: rest_total_hits_as_int: true @@ -608,9 +602,6 @@ setup: --- "Test exists query on _source field": - - skip: - version: " - 6.0.99" - reason: exists on _source not supported prior to 6.1.0 - do: catch: /query_shard_exception/ search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml index 89ea24618c68f..82ccb816f2314 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/170_terms_query.yml @@ -1,8 +1,5 @@ --- "Terms Query with No.of terms exceeding index.max_terms_count should FAIL": - - skip: - version: " - 6.99.99" - reason: index.max_terms_count setting has been added in 7.0.0 - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/171_terms_query_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/171_terms_query_with_types.yml deleted file mode 100644 index d3d48eae4082d..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/171_terms_query_with_types.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -"Terms Query with No.of terms exceeding index.max_terms_count should FAIL": - - skip: - version: " - 6.99.99" - reason: index.max_terms_count setting has been added in 7.0.0 - features: allowed_warnings - - do: - indices.create: - include_type_name: true - index: test_index - body: - settings: - number_of_shards: 1 - index.max_terms_count: 2 - mappings: - test_type: - properties: - user: - type: keyword - followers: - type: keyword - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "u1"}}' - - '{"user": "u1", "followers": ["u2", "u3"]}' - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "u2"}}' - - '{"user": "u2", "followers": ["u1", "u3", "u4"]}' - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "u3"}}' - - '{"user": "u3", "followers": ["u1"]}' - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "u4"}}' - - '{"user": "u4", "followers": ["u3"]}' - - - do: - search: - rest_total_hits_as_int: true - index: test_index - body: {"query" : {"terms" : {"user" : ["u1", "u2"]}}} - - match: { hits.total: 2 } - - - do: - catch: bad_request - search: - rest_total_hits_as_int: true - index: test_index - body: {"query" : {"terms" : {"user" : ["u1", "u2", "u3"]}}} - - - do: - allowed_warnings: - - "Deprecated field [type] used, this field is unused and will be removed entirely" - search: - rest_total_hits_as_int: true - index: test_index - body: {"query" : {"terms" : {"user" : {"index" : "test_index", "type" : "test_type", "id" : "u1", "path" : "followers"}}}} - - match: { hits.total: 2 } - - - do: - catch: bad_request - search: - rest_total_hits_as_int: true - index: test_index - body: {"query" : {"terms" : {"user" : {"index" : "test_index", "type" : "test_type", "id" : "u2", "path" : "followers"}}}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 40c80b88cfb1b..6f276f669f815 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.2.99" - reason: index_prefixes is only available as of 6.3.0 - - do: indices.create: index: test @@ -27,9 +23,6 @@ setup: --- "search with index prefixes": - - skip: - version: " - 6.2.99" - reason: index_prefixes is only available as of 6.3.0 - do: search: rest_total_hits_as_int: true @@ -85,10 +78,6 @@ setup: --- "search index prefixes with span_multi": - - skip: - version: " - 6.99.99" - reason: span_multi throws an exception with prefix fields on < versions - - do: search: rest_total_hits_as_int: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml index 71ddb32302396..8596821a76c41 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml @@ -1,9 +1,5 @@ --- setup: - - skip: - version: " - 6.3.99" - reason: _ignored was added in 6.4.0 - - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml index b48857be4e7a1..6340b20a4765f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml @@ -1,8 +1,5 @@ --- "search with indexed phrases": - - skip: - version: " - 6.99.99" - reason: index_phrase is only available as of 7.0.0 - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml index fd4621e48cad3..14d68fa3c429a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml @@ -46,7 +46,6 @@ setup: - match: {hits.total: 1} - match: {hits.hits.0._index: test_1 } - - match: {hits.hits.0._type: _doc } - match: {hits.hits.0._id: "1" } - do: @@ -60,7 +59,6 @@ setup: - match: {hits.total: 1} - match: {hits.hits.0._index: test_2 } - - match: {hits.hits.0._type: _doc } - match: {hits.hits.0._id: "42" } --- @@ -76,9 +74,6 @@ setup: --- "Search with new response format": - - skip: - version: " - 6.99.99" - reason: hits.total is returned as an object in 7.0.0 - do: search: body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml index 92bb049980dff..4d63a81a99595 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml @@ -1,8 +1,5 @@ --- "Score should match explanation in rescore": - - skip: - version: " - 6.99.99" - reason: Explanation for rescoring was corrected after these versions - do: bulk: refresh: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml index 78380d0da6a71..0286d3caf66b8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: "Implemented in 7.0" - - do: indices.create: index: test @@ -17,16 +13,73 @@ setup: refresh: true body: - '{"index": {"_index": "test", "_id": "1"}}' - - '{"text" : "Some like it hot, some like it cold"}' + - '{"text" : "Some like hot and dry, some like it cold and wet"}' - '{"index": {"_index": "test", "_id": "2"}}' - '{"text" : "Its cold outside, theres no kind of atmosphere"}' - '{"index": {"_index": "test", "_id": "3"}}' - '{"text" : "Baby its cold there outside"}' - '{"index": {"_index": "test", "_id": "4"}}' - '{"text" : "Outside it is cold and wet"}' + - '{"index": {"_index": "test", "_id": "5"}}' + - '{"text" : "cold rain makes it wet"}' + - '{"index": {"_index": "test", "_id": "6"}}' + - '{"text" : "that is some cold cold rain"}' --- -"Test ordered matching": +"Test regexp": + - skip: + version: " - 1.2.99" + reason: "regexp introduced in 1.3" + - do: + search: + index: test + body: + query: + intervals: + text: + regexp: + pattern: "at[a-z]{2,}here" + - match: { hits.total.value: 1 } + +--- +"Test regexp, explicit case sensitive": + - skip: + version: " - 1.2.99" + reason: "case_insensitive introduced in 1.3" + - do: + search: + index: test + body: + query: + intervals: + text: + regexp: + pattern: "AT[a-z]{2,}HERE" + case_insensitive: false + - match: { hits.total.value: 0 } + +--- +"Test regexp, explicit case insensitive": + - skip: + version: " - 1.2.99" + reason: "case_insensitive introduced in 1.3" + - do: + search: + index: test + body: + query: + intervals: + text: + regexp: + pattern: "AT[a-z]{2,}HERE" + case_insensitive: true + - match: { hits.total.value: 1 } + +--- +"Test ordered matching with via mode": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -36,7 +89,25 @@ setup: text: match: query: "cold outside" - ordered: true + mode: "ordered" + - match: { hits.total.value: 2 } + +--- +"Test ordered matching": + - skip: + features: allowed_warnings + - do: + allowed_warnings: + - "Deprecated field [ordered] used, this field is unused and will be removed entirely" + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold outside" + ordered: true - match: { hits.total.value: 2 } --- @@ -52,9 +123,30 @@ setup: query: "cold outside" - match: { hits.total.value: 3 } +--- +"Test explicit unordered matching via mode": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" + - do: + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold outside" + mode: "unordered" + - match: { hits.total.value: 3 } + --- "Test explicit unordered matching": + - skip: + features: allowed_warnings - do: + allowed_warnings: + - "Deprecated field [ordered] used, this field is unused and will be removed entirely" search: index: test body: @@ -66,8 +158,45 @@ setup: ordered: false - match: { hits.total.value: 3 } +--- +"Test unordered with overlap in match": + - skip: + version: " - 1.2.99" + reason: "Implemented in 2.0" + - do: + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold wet it" + mode: "unordered" + - match: { hits.total.value: 3 } + +--- +"Test unordered with no overlap in match": + - skip: + version: " - 1.2.99" + reason: "Implemented in 2.0" + - do: + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold wet it" + mode: "unordered_no_overlap" + - match: { hits.total.value: 2 } + --- "Test phrase matching": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -77,7 +206,7 @@ setup: text: match: query: "cold outside" - ordered: true + mode: "ordered" max_gaps: 0 - match: { hits.total.value: 1 } @@ -97,6 +226,9 @@ setup: --- "Test ordered max_gaps matching": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -107,12 +239,41 @@ setup: match: query: "cold outside" max_gaps: 0 - ordered: true + mode: "ordered" + - match: { hits.total.value: 1 } + +--- +"Test ordered combination with disjunction via mode": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - any_of: + intervals: + - match: + query: "cold" + - match: + query: "outside" + - match: + query: "atmosphere" + mode: "ordered" - match: { hits.total.value: 1 } --- "Test ordered combination with disjunction": + - skip: + features: allowed_warnings - do: + allowed_warnings: + - "Deprecated field [ordered] used, this field is unused and will be removed entirely" search: index: test body: @@ -134,6 +295,9 @@ setup: --- "Test ordered combination with max_gaps": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -148,11 +312,14 @@ setup: - match: query: "outside" max_gaps: 0 - ordered: true + mode: "ordered" - match: { hits.total.value: 1 } --- "Test ordered combination": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -166,12 +333,38 @@ setup: query: "cold" - match: query: "outside" - ordered: true + mode: "ordered" + - match: { hits.total.value: 2 } + +--- +"Test unordered combination via mode": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: "cold" + - match: + query: "outside" + max_gaps: 1 + mode: "unordered" - match: { hits.total.value: 2 } --- "Test unordered combination": + - skip: + features: allowed_warnings - do: + allowed_warnings: + - "Deprecated field [ordered] used, this field is unused and will be removed entirely" search: index: test body: @@ -188,8 +381,107 @@ setup: ordered: false - match: { hits.total.value: 2 } +--- +"Test unordered combination with overlap": + - skip: + version: " - 1.2.99" + reason: "Implemented in 2.0" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: "cold" + - match: + query: "wet" + - match: + query: "it" + mode: "unordered" + - match: { hits.total.value: 3 } + +--- +"Test unordered combination no overlap": + - skip: + version: " - 1.2.99" + reason: "Implemented in 2.0" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: "cold" + - match: + query: "wet" + - match: + query: "it" + mode: "unordered_no_overlap" + - match: { hits.total.value: 2 } + +--- +"Test nested unordered combination with overlap": + - skip: + version: " - 1.2.99" + reason: "Implemented in 2.0" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - any_of: + intervals: + - match: + query: "cold" + - match: + query: "hot" + - match: + query: "cold" + mode: "unordered" + - match: { hits.total.value: 6 } + +--- +"Test nested unordered combination no overlap": + - skip: + version: " - 1.2.99" + reason: "Implemented in 2.0" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - any_of: + intervals: + - match: + query: "cold" + - match: + query: "hot" + - match: + query: "cold" + mode: "unordered_no_overlap" + - match: { hits.total.value: 2 } + --- "Test block combination": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -203,13 +495,16 @@ setup: query: "cold" - match: query: "outside" - ordered: true + mode: "ordered" max_gaps: 0 - match: { hits.total.value: 1 } --- "Test containing": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -223,7 +518,7 @@ setup: query: "cold" - match: query: "outside" - ordered: false + mode: "unordered" filter: containing: match: @@ -233,6 +528,9 @@ setup: --- "Test not containing": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -246,7 +544,7 @@ setup: query: "cold" - match: query: "outside" - ordered: false + mode: "unordered" filter: not_containing: match: @@ -255,6 +553,9 @@ setup: --- "Test contained_by": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -272,7 +573,7 @@ setup: query: "cold" - match: query: "outside" - ordered: false + mode: "unordered" - match: { hits.total.value: 1 } --- @@ -294,10 +595,13 @@ setup: query: "cold" - match: query: "outside" - - match: { hits.total.value: 1 } + - match: { hits.total.value: 2 } --- "Test not_overlapping": + - skip: + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -311,7 +615,7 @@ setup: query: "cold" - match: query: "outside" - ordered: true + mode: "ordered" filter: not_overlapping: all_of: @@ -320,14 +624,14 @@ setup: query: "baby" - match: query: "there" - ordered: false + mode: "unordered" - match: { hits.total.value: 1 } --- "Test overlapping": - skip: - version: " - 7.1.99" - reason: "Implemented in 7.2" + version: " - 1.2.99" + reason: "mode introduced in 1.3" - do: search: index: test @@ -337,12 +641,12 @@ setup: text: match: query: "cold outside" - ordered: true + mode: "ordered" filter: overlapping: match: query: "baby there" - ordered: false + mode: "unordered" - match: { hits.total.value: 1 } - match: { hits.hits.0._id: "3" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index 7657dc2bebb36..feb875e81a785 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: " - 6.99.99" - reason: "Implemented in 7.0" - - do: indices.create: index: date_ns diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index 17735c7fd451a..1f550d114cf29 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -64,10 +64,6 @@ setup: --- "Docvalues_fields size limit": - - - skip: - version: " - 6.99.99" - reason: "Triggers warnings before 7.0" - do: catch: /Trying to retrieve too many docvalue_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_docvalue_fields_search\] index level setting\./ search: @@ -99,10 +95,6 @@ setup: --- "Regexp length limit": - - skip: - version: " - 6.99.99" - reason: "The regex length limit was introduced in 7.0.0" - - do: catch: /The length of regex \[1110\] used in the Regexp Query request has exceeded the allowed maximum of \[1000\]\. This maximum can be set by changing the \[index.max_regex_length\] index level setting\./ search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yml index d306cb7b1ad50..e38f5f862a273 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yml @@ -31,10 +31,8 @@ - is_true: _shards.total - is_true: hits.total - is_true: hits.hits.0._index - - is_true: hits.hits.0._type - is_true: hits.hits.0._id - is_true: hits.hits.1._index - - is_true: hits.hits.1._type - is_true: hits.hits.1._id - do: @@ -48,10 +46,8 @@ - is_false: _shards.total - is_false: hits.total - is_false: hits.hits.0._index - - is_false: hits.hits.0._type - is_false: hits.hits.0._id - is_false: hits.hits.1._index - - is_false: hits.hits.1._type - is_false: hits.hits.1._id - do: @@ -65,10 +61,8 @@ - is_true: _shards.total - is_false: hits.total - is_false: hits.hits.0._index - - is_false: hits.hits.0._type - is_false: hits.hits.0._id - is_false: hits.hits.1._index - - is_false: hits.hits.1._type - is_false: hits.hits.1._id - do: @@ -82,10 +76,8 @@ - is_true: _shards.total - is_true: hits.total - is_true: hits.hits.0._index - - is_false: hits.hits.0._type - is_true: hits.hits.0._id - is_true: hits.hits.1._index - - is_false: hits.hits.1._type - is_true: hits.hits.1._id --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 9f0273fbc0213..5f5d88dba7687 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -38,7 +38,6 @@ - match: {hits.total: 3 } - length: {hits.hits: 1 } - match: {hits.hits.0._index: test } - - match: {hits.hits.0._type: _doc } - match: {hits.hits.0._id: "172" } - match: {hits.hits.0.sort: [24, 172] } @@ -57,7 +56,6 @@ - match: {hits.total: 3 } - length: {hits.hits: 1 } - match: {hits.hits.0._index: test } - - match: {hits.hits.0._type: _doc } - match: {hits.hits.0._id: "42" } - match: {hits.hits.0.sort: [18, 42] } @@ -76,7 +74,6 @@ - match: {hits.total: 3} - length: {hits.hits: 1 } - match: {hits.hits.0._index: test } - - match: {hits.hits.0._type: _doc } - match: {hits.hits.0._id: "1" } - match: {hits.hits.0.sort: [18, 1] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml index fe70620c6ef62..ee831e78c74a6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml @@ -85,10 +85,6 @@ setup: --- "Create a snapshot for missing index": - - skip: - version: " - 6.0.0" - reason: ignore_unavailable default is false in 6.0.0 - - do: catch: missing snapshot.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml index 874dda3606c4a..57a4cb55852a5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml @@ -96,10 +96,6 @@ setup: --- "Get snapshot info contains include_global_state": - - skip: - version: " - 6.1.99" - reason: "include_global_state field has been added in the response in 6.2.0" - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml index b64a51141dc6e..dfed3346726cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml @@ -281,21 +281,15 @@ setup: - length: { suggest.result.0.options: 2 } - match: { suggest.result.0.options.0.text: "baz" } - match: { suggest.result.0.options.0._index: "test" } - - match: { suggest.result.0.options.0._type: "_doc" } - match: { suggest.result.0.options.0._source.title: "title_baz" } - match: { suggest.result.0.options.0._source.count: 3 } - match: { suggest.result.0.options.1.text: "bar" } - match: { suggest.result.0.options.1._index: "test" } - - match: { suggest.result.0.options.1._type: "_doc" } - match: { suggest.result.0.options.1._source.title: "title_bar" } - match: { suggest.result.0.options.1._source.count: 4 } --- "Skip duplicates should work": - - skip: - version: " - 6.0.99" - reason: skip_duplicates was added in 6.1 - - do: index: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml index e2c7ccfb421e3..df415ef484b1f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml @@ -277,10 +277,6 @@ setup: --- "Skip duplicates with contexts should work": - - skip: - version: " - 6.0.99" - reason: skip_duplicates was added in 6.1 - - do: index: index: test @@ -333,10 +329,6 @@ setup: --- "Indexing and Querying without contexts is forbidden": - - skip: - version: " - 6.99.99" - reason: this feature was removed in 7.0 - - do: index: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml index a29019183e199..bcd5fa14c87f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/50_completion_with_multi_fields.yml @@ -1,11 +1,6 @@ --- "Search by suggestion and by keyword sub-field should work": - - - skip: - version: " - 6.99.99" - reason: "Search by suggestion with multi-fields was introduced 7.0.0" - - do: indices.create: index: completion_with_sub_keyword @@ -63,11 +58,6 @@ --- "Search by suggestion on sub field should work": - - - skip: - version: " - 6.99.99" - reason: "Search by suggestion with multi-fields was introduced 7.0.0" - - do: indices.create: index: completion_with_sub_completion @@ -113,11 +103,6 @@ --- "Search by suggestion on sub field with context should work": - - - skip: - version: " - 6.99.99" - reason: "Search by suggestion with multi-fields was introduced 7.0.0" - - do: indices.create: index: completion_with_context @@ -182,11 +167,6 @@ --- "Search by suggestion on sub field with weight should work": - - - skip: - version: " - 6.99.99" - reason: "Search by suggestion with multi-fields was introduced 7.0.0" - - do: indices.create: index: completion_with_weight @@ -238,11 +218,6 @@ --- "Search by suggestion on geofield-hash on sub field should work": - - - skip: - version: " - 6.99.99" - reason: "Search by suggestion with multi-fields was introduced 7.0.0" - - do: indices.create: index: geofield_with_completion diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index 1742134af2b75..d0385ac0125f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -23,10 +23,7 @@ --- "tasks_list headers": - skip: - version: " - 6.99.99" - features: headers - reason: task headers has been added in 7.0.0 - + features: headers - do: headers: { "X-Opaque-Id": "That is me" } tasks.list: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml deleted file mode 100644 index 992d6db7ca786..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml +++ /dev/null @@ -1,36 +0,0 @@ -setup: - - do: - indices.create: - include_type_name: true - index: testidx - body: - mappings: - testtype: - "properties": - "text": - "type" : "text" - "term_vector" : "with_positions_offsets" - - do: - index: - index: testidx - type: testtype - id: testing_document - body: - "text" : "The quick brown fox is brown." - - do: - indices.refresh: {} - ---- -"Basic tests for termvector get": - - - do: - termvectors: - index: testidx - type: testtype - id: testing_document - "term_statistics" : true - - - - match: {term_vectors.text.field_statistics.sum_doc_freq: 5} - - match: {term_vectors.text.terms.brown.doc_freq: 1} - - match: {term_vectors.text.terms.brown.tokens.0.start_offset: 10} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml index 5f43e8a247923..44a78cadc1ada 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/20_issue7121.yml @@ -1,8 +1,3 @@ -setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - --- "Term vector API should return 'found: false' for docs between index and refresh": - do: @@ -39,6 +34,5 @@ setup: realtime: false - match: { _index: "testidx" } - - match: { _type: "_doc" } - match: { _id: "1" } - is_false: found diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml deleted file mode 100644 index cf597bf141f61..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml +++ /dev/null @@ -1,42 +0,0 @@ -"Term vector API should return 'found: false' for docs between index and refresh": - - do: - indices.create: - include_type_name: true - index: testidx - body: - settings: - index: - translog.flush_threshold_size: "512MB" - number_of_shards: 1 - number_of_replicas: 0 - refresh_interval: -1 - mappings: - doc: - properties: - text: - type : "text" - term_vector : "with_positions_offsets" - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: testidx - type: doc - id: 1 - body: - text : "foo bar" - - - do: - termvectors: - index: testidx - type: doc - id: 1 - realtime: false - - - match: { _index: "testidx" } - - match: { _type: "doc" } - - match: { _id: "1" } - - is_false: found diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/30_realtime.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/30_realtime.yml index 0cb6dfc06904b..1d357bb587021 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/30_realtime.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/30_realtime.yml @@ -1,8 +1,3 @@ -setup: - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - --- "Realtime Term Vectors": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/31_realtime_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/31_realtime_with_types.yml deleted file mode 100644 index 26f441207ace8..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/31_realtime_with_types.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -"Realtime Term Vectors": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - refresh_interval: -1 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - termvectors: - index: test_1 - type: test - id: 1 - realtime: false - - - is_false: found - - - do: - termvectors: - index: test_1 - type: test - id: 1 - realtime: true - - - is_true: found diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml deleted file mode 100644 index 4382442dee4dd..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -"Term vectors with typeless API on an index that has types": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "text" - term_vector: "with_positions" - - - do: - index: - index: index - type: not_doc - id: 1 - body: { foo: bar } - - - do: - indices.refresh: {} - - - do: - termvectors: - index: index - type: _doc # todo: remove when termvectors support typeless API - id: 1 - - - is_true: found - - match: {_type: _doc} - - match: {term_vectors.foo.terms.bar.term_freq: 1} - - - do: - termvectors: - index: index - type: some_random_type - id: 1 - - - is_false: found diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml index 3a35ad46f9161..4cb6710cc161c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml @@ -1,10 +1,5 @@ --- "Partial document": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - - do: index: index: test_1 @@ -25,7 +20,6 @@ one: 3 - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: "1" } - match: { _version: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml index 41dba3551e64c..ffcb72027b33d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml @@ -32,7 +32,6 @@ foo: baz - match: { _index: foobar } - - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 2} - match: { _shards.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml index 657c036291bd6..ff81bdfd39b26 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml @@ -1,9 +1,5 @@ --- "Update result field": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: update: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/13_legacy_doc.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/13_legacy_doc.yml index 08f3457400d4f..a97c68ba6ee3f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/13_legacy_doc.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/13_legacy_doc.yml @@ -21,7 +21,6 @@ one: 3 - match: { _index: test_1 } - - match: { _type: _doc } - match: { _id: "1" } - match: { _version: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/14_shard_header_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/14_shard_header_with_types.yml deleted file mode 100644 index eb2e4ff9a9117..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/14_shard_header_with_types.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -"Update check shard header": - - - do: - indices.create: - index: foobar - body: - settings: - number_of_shards: "1" - number_of_replicas: "0" - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: foobar - type: baz - id: 1 - body: { foo: bar } - - - do: - update: - index: foobar - type: baz - id: 1 - body: - doc: - foo: baz - - - match: { _index: foobar } - - match: { _type: baz } - - match: { _id: "1"} - - match: { _version: 2} - - match: { _shards.total: 1} - - match: { _shards.successful: 1} - - match: { _shards.failed: 0} - - is_false: _shards.pending diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/15_result_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/15_result_with_types.yml deleted file mode 100644 index 9adada6d54b4f..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/15_result_with_types.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -"Update result field": - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: bar } - doc_as_upsert: true - - - match: { _version: 1 } - - match: { result: created } - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: bar } - doc_as_upsert: true - - - match: { _version: 1 } - - match: { result: noop } - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: bar } - doc_as_upsert: true - detect_noop: false - - - match: { _version: 2 } - - match: { result: updated } - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: baz } - doc_as_upsert: true - detect_noop: true - - - match: { _version: 3 } - - match: { result: updated } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/16_noop.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/16_noop.yml index bfb56541fb7eb..dfdf2a10c84fc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/16_noop.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/16_noop.yml @@ -6,7 +6,6 @@ - do: index: index: test_1 - type: test id: 1 body: { foo: bar } @@ -18,7 +17,6 @@ - do: update: index: test_1 - type: test id: 1 body: doc: { foo: bar } @@ -31,7 +29,6 @@ - do: update: index: test_1 - type: test id: 1 body: doc: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml index a849eecc66629..4d03971aba252 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml @@ -1,9 +1,5 @@ --- "Doc upsert": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: update: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/21_doc_upsert_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/21_doc_upsert_with_types.yml deleted file mode 100644 index f34e030ff66a0..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/21_doc_upsert_with_types.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -"Doc upsert": - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: bar, count: 1 } - upsert: { foo: baz } - - - do: - get: - index: test_1 - type: test - id: 1 - - - match: { _source.foo: baz } - - is_false: _source.count - - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: bar, count: 1 } - upsert: { foo: baz } - - - do: - get: - index: test_1 - type: test - id: 1 - - - match: { _source.foo: bar } - - match: { _source.count: 1 } - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml index 5bdc3ecea75fc..c65fc5af27fcc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml @@ -1,9 +1,5 @@ --- "Doc as upsert": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: update: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/24_doc_as_upsert_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/24_doc_as_upsert_with_types.yml deleted file mode 100644 index 7585b9f3e0b94..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/24_doc_as_upsert_with_types.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -"Doc as upsert": - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: bar, count: 1 } - doc_as_upsert: true - - - do: - get: - index: test_1 - type: test - id: 1 - - - match: { _source.foo: bar } - - match: { _source.count: 1 } - - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { count: 2 } - doc_as_upsert: true - - - do: - get: - index: test_1 - type: test - id: 1 - - - match: { _source.foo: bar } - - match: { _source.count: 2 } - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/41_routing_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/41_routing_with_types.yml deleted file mode 100644 index 977db506710c7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/41_routing_with_types.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -"Routing": - - - do: - indices.create: - index: test_1 - body: - settings: - index: - number_of_shards: 5 - number_of_routing_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - update: - index: test_1 - type: test - id: 1 - routing: 5 - body: - doc: { foo: baz } - upsert: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 5 - stored_fields: _routing - - - match: { _routing: "5"} - - - do: - catch: missing - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: baz } - - - do: - update: - index: test_1 - type: test - id: 1 - routing: 5 - _source: foo - body: - doc: { foo: baz } - - - match: { get._source.foo: baz } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/61_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/61_refresh_with_types.yml deleted file mode 100644 index be2d9f9f7969e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/61_refresh_with_types.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- -"Refresh": - - - do: - indices.create: - index: test_1 - body: - settings: - index.refresh_interval: -1 - number_of_replicas: 0 - - - do: - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: baz } - upsert: { foo: bar } - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 1 }} - - - match: { hits.total: 0 } - - - do: - update: - index: test_1 - type: test - id: 2 - refresh: true - body: - doc: { foo: baz } - upsert: { foo: bar } - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { _id: 2 }} - - - match: { hits.total: 1 } - ---- -"When refresh url parameter is an empty string that means \"refresh immediately\"": - - do: - index: - index: test_1 - type: test - id: 1 - refresh: true - body: { foo: bar } - - is_true: forced_refresh - - - do: - update: - index: test_1 - type: test - id: 1 - refresh: "" - body: - doc: {cat: dog} - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: test_1 - body: - query: { term: { cat: dog }} - - - match: { hits.total: 1 } - ---- -"refresh=wait_for waits until changes are visible in search": - - do: - index: - index: update_60_refresh_1 - type: test - id: update_60_refresh_id1 - body: { foo: bar } - refresh: true - - is_true: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: update_60_refresh_1 - body: - query: { term: { _id: update_60_refresh_id1 }} - - match: { hits.total: 1 } - - - do: - update: - index: update_60_refresh_1 - type: test - id: update_60_refresh_id1 - refresh: wait_for - body: - doc: { test: asdf } - - is_false: forced_refresh - - - do: - search: - rest_total_hits_as_int: true - index: update_60_refresh_1 - body: - query: { match: { test: asdf } } - - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml index 9e6d5a4671955..e196e03143456 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml @@ -1,9 +1,5 @@ --- "Source filtering": - - - skip: - version: " - 6.99.99" - reason: types are required in requests before 7.0.0 - do: update: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/81_source_filtering_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/81_source_filtering_with_types.yml deleted file mode 100644 index 4bb22e6b8012e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/81_source_filtering_with_types.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -"Source filtering": - - - do: - update: - index: test_1 - type: test - id: 1 - _source: [foo, bar] - body: - doc: { foo: baz } - upsert: { foo: bar } - - - match: { get._source.foo: bar } - - is_false: get._source.bar - -# TODO: -# -# - Add _routing diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/86_fields_meta_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/86_fields_meta_with_types.yml deleted file mode 100644 index f7791d0986399..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/86_fields_meta_with_types.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -"Metadata Fields": - - - skip: - version: "all" - reason: "Update doesn't return metadata fields, waiting for #3259" - - - do: - indices.create: - index: test_1 - - - do: - update: - index: test_1 - type: test - id: 1 - parent: 5 - fields: [ _routing ] - body: - doc: { foo: baz } - upsert: { foo: bar } - - - match: { get._routing: "5" } - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 5 - stored_fields: [ _routing ] - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml deleted file mode 100644 index 0ca25e8598c24..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- -"Update with typeless API on an index that has types": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - index: - index: index - type: not_doc - id: 1 - body: { foo: bar } - - - do: - update: - index: index - id: 1 - body: - doc: - foo: baz - - - do: - get: - index: index - type: not_doc - id: 1 - - - match: { _source.foo: baz } - ---- -"Update call that introduces new field mappings": - - - skip: - version: " - 6.99.99" - reason: Typeless APIs were introduced in 7.0.0 - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - - do: - index: - index: index - type: not_doc - id: 1 - body: { foo: bar } - - - do: - update: - index: index - id: 1 - body: - doc: - foo: baz - new_field: value - - do: - get: # using typeful API on purpose - index: index - type: not_doc - id: 1 - - - match: { _index: "index" } - - match: { _type: "not_doc" } - - match: { _id: "1" } - - match: { _version: 2} - - match: { _source.foo: baz } - - match: { _source.new_field: value } diff --git a/server/build.gradle b/server/build.gradle index 44b88754312ac..aa467cd0528bf 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -227,7 +227,6 @@ tasks.named("thirdPartyAudit").configure { 'com.fasterxml.jackson.databind.ObjectMapper', // from log4j - 'com.conversantmedia.util.concurrent.DisruptorBlockingQueue', 'com.conversantmedia.util.concurrent.SpinPolicy', 'com.fasterxml.jackson.annotation.JsonInclude$Include', 'com.fasterxml.jackson.databind.DeserializationContext', @@ -251,8 +250,6 @@ tasks.named("thirdPartyAudit").configure { 'com.fasterxml.jackson.databind.node.ObjectNode', 'org.fusesource.jansi.Ansi', 'org.fusesource.jansi.AnsiRenderer$Code', - 'com.lmax.disruptor.BlockingWaitStrategy', - 'com.lmax.disruptor.BusySpinWaitStrategy', 'com.lmax.disruptor.EventFactory', 'com.lmax.disruptor.EventTranslator', 'com.lmax.disruptor.EventTranslatorTwoArg', @@ -262,10 +259,7 @@ tasks.named("thirdPartyAudit").configure { 'com.lmax.disruptor.RingBuffer', 'com.lmax.disruptor.Sequence', 'com.lmax.disruptor.SequenceReportingEventHandler', - 'com.lmax.disruptor.SleepingWaitStrategy', - 'com.lmax.disruptor.TimeoutBlockingWaitStrategy', 'com.lmax.disruptor.WaitStrategy', - 'com.lmax.disruptor.YieldingWaitStrategy', 'com.lmax.disruptor.dsl.Disruptor', 'com.lmax.disruptor.dsl.ProducerType', 'javax.jms.Connection', @@ -284,23 +278,17 @@ tasks.named("thirdPartyAudit").configure { 'javax.mail.Transport', 'javax.mail.internet.InternetAddress', 'javax.mail.internet.InternetHeaders', - 'javax.mail.internet.MimeBodyPart', 'javax.mail.internet.MimeMessage', 'javax.mail.internet.MimeMultipart', 'javax.mail.internet.MimeUtility', - 'javax.mail.util.ByteArrayDataSource', 'org.apache.commons.compress.compressors.CompressorStreamFactory', 'org.apache.commons.compress.utils.IOUtils', 'org.apache.commons.csv.CSVFormat', 'org.apache.commons.csv.QuoteMode', - 'org.apache.kafka.clients.producer.KafkaProducer', 'org.apache.kafka.clients.producer.Producer', - 'org.apache.kafka.clients.producer.ProducerRecord', 'org.apache.kafka.clients.producer.RecordMetadata', 'org.codehaus.stax2.XMLStreamWriter2', 'org.jctools.queues.MpscArrayQueue', - 'org.osgi.framework.AdaptPermission', - 'org.osgi.framework.AdminPermission', 'org.osgi.framework.Bundle', 'org.osgi.framework.BundleActivator', 'org.osgi.framework.BundleContext', diff --git a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java index 666c0a87a7acb..eeee000fa9c2d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java @@ -234,11 +234,7 @@ public void testIndex() { String[] indexShardActions = new String[] { BulkAction.NAME + "[s][p]", BulkAction.NAME + "[s][r]" }; interceptTransportActions(indexShardActions); - IndexRequest indexRequest = new IndexRequest(randomIndexOrAlias(), "type", "id").source( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ); + IndexRequest indexRequest = new IndexRequest(randomIndexOrAlias()).id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); internalCluster().coordOnlyNodeClient().index(indexRequest).actionGet(); clearInterceptedActions(); @@ -249,7 +245,7 @@ public void testDelete() { String[] deleteShardActions = new String[] { BulkAction.NAME + "[s][p]", BulkAction.NAME + "[s][r]" }; interceptTransportActions(deleteShardActions); - DeleteRequest deleteRequest = new DeleteRequest(randomIndexOrAlias(), "type", "id"); + DeleteRequest deleteRequest = new DeleteRequest(randomIndexOrAlias()).id("id"); internalCluster().coordOnlyNodeClient().delete(deleteRequest).actionGet(); clearInterceptedActions(); @@ -263,7 +259,7 @@ public void testUpdate() { String indexOrAlias = randomIndexOrAlias(); client().prepareIndex(indexOrAlias, "type", "id").setSource("field", "value").get(); - UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "type", "id").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "id").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); UpdateResponse updateResponse = internalCluster().coordOnlyNodeClient().update(updateRequest).actionGet(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); @@ -277,7 +273,7 @@ public void testUpdateUpsert() { interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); - UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "type", "id").upsert(Requests.INDEX_CONTENT_TYPE, "field", "value") + UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "id").upsert(Requests.INDEX_CONTENT_TYPE, "field", "value") .doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); UpdateResponse updateResponse = internalCluster().coordOnlyNodeClient().update(updateRequest).actionGet(); assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult()); @@ -293,7 +289,7 @@ public void testUpdateDelete() { String indexOrAlias = randomIndexOrAlias(); client().prepareIndex(indexOrAlias, "type", "id").setSource("field", "value").get(); - UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "type", "id").script( + UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "id").script( new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op='delete'", Collections.emptyMap()) ); UpdateResponse updateResponse = internalCluster().coordOnlyNodeClient().update(updateRequest).actionGet(); @@ -312,19 +308,19 @@ public void testBulk() { int numIndexRequests = iterations(1, 10); for (int i = 0; i < numIndexRequests; i++) { String indexOrAlias = randomIndexOrAlias(); - bulkRequest.add(new IndexRequest(indexOrAlias, "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")); + bulkRequest.add(new IndexRequest(indexOrAlias).id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")); indices.add(indexOrAlias); } int numDeleteRequests = iterations(1, 10); for (int i = 0; i < numDeleteRequests; i++) { String indexOrAlias = randomIndexOrAlias(); - bulkRequest.add(new DeleteRequest(indexOrAlias, "type", "id")); + bulkRequest.add(new DeleteRequest(indexOrAlias).id("id")); indices.add(indexOrAlias); } int numUpdateRequests = iterations(1, 10); for (int i = 0; i < numUpdateRequests; i++) { String indexOrAlias = randomIndexOrAlias(); - bulkRequest.add(new UpdateRequest(indexOrAlias, "type", "id").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1")); + bulkRequest.add(new UpdateRequest(indexOrAlias, "id").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1")); indices.add(indexOrAlias); } @@ -338,7 +334,7 @@ public void testGet() { String getShardAction = GetAction.NAME + "[s]"; interceptTransportActions(getShardAction); - GetRequest getRequest = new GetRequest(randomIndexOrAlias(), "type", "id"); + GetRequest getRequest = new GetRequest(randomIndexOrAlias(), "id"); internalCluster().coordOnlyNodeClient().get(getRequest).actionGet(); clearInterceptedActions(); @@ -349,7 +345,7 @@ public void testExplain() { String explainShardAction = ExplainAction.NAME + "[s]"; interceptTransportActions(explainShardAction); - ExplainRequest explainRequest = new ExplainRequest(randomIndexOrAlias(), "type", "id").query(QueryBuilders.matchAllQuery()); + ExplainRequest explainRequest = new ExplainRequest(randomIndexOrAlias(), "id").query(QueryBuilders.matchAllQuery()); internalCluster().coordOnlyNodeClient().explain(explainRequest).actionGet(); clearInterceptedActions(); @@ -360,7 +356,7 @@ public void testTermVector() { String termVectorShardAction = TermVectorsAction.NAME + "[s]"; interceptTransportActions(termVectorShardAction); - TermVectorsRequest termVectorsRequest = new TermVectorsRequest(randomIndexOrAlias(), "type", "id"); + TermVectorsRequest termVectorsRequest = new TermVectorsRequest(randomIndexOrAlias(), "id"); internalCluster().coordOnlyNodeClient().termVectors(termVectorsRequest).actionGet(); clearInterceptedActions(); @@ -376,7 +372,7 @@ public void testMultiTermVector() { int numDocs = iterations(1, 30); for (int i = 0; i < numDocs; i++) { String indexOrAlias = randomIndexOrAlias(); - multiTermVectorsRequest.add(indexOrAlias, "type", Integer.toString(i)); + multiTermVectorsRequest.add(indexOrAlias, Integer.toString(i)); indices.add(indexOrAlias); } internalCluster().coordOnlyNodeClient().multiTermVectors(multiTermVectorsRequest).actionGet(); @@ -394,7 +390,7 @@ public void testMultiGet() { int numDocs = iterations(1, 30); for (int i = 0; i < numDocs; i++) { String indexOrAlias = randomIndexOrAlias(); - multiGetRequest.add(indexOrAlias, "type", Integer.toString(i)); + multiGetRequest.add(indexOrAlias, Integer.toString(i)); indices.add(indexOrAlias); } internalCluster().coordOnlyNodeClient().multiGet(multiGetRequest).actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java index a0ddf68355a63..1512fa4934ca1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java @@ -48,7 +48,7 @@ public void testThreadedListeners() throws Throwable { final AtomicReference threadName = new AtomicReference<>(); Client client = client(); - IndexRequest request = new IndexRequest("test", "type", "1"); + IndexRequest request = new IndexRequest("test").id("1"); if (randomBoolean()) { // set the source, without it, we will have a verification failure request.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index 3516c7a145aea..9c3cf4c28e55e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -379,14 +379,12 @@ public void testSearchTaskDescriptions() { headers.put(Task.X_OPAQUE_ID, "my_id"); headers.put("Foo-Header", "bar"); headers.put("Custom-Task-Header", "my_value"); - assertSearchResponse( - client().filterWithHeader(headers).prepareSearch("test").setTypes("doc").setQuery(QueryBuilders.matchAllQuery()).get() - ); + assertSearchResponse(client().filterWithHeader(headers).prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).get()); // the search operation should produce one main task List mainTask = findEvents(SearchAction.NAME, Tuple::v1); assertEquals(1, mainTask.size()); - assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], types[doc], search_type[")); + assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], search_type[")); assertThat(mainTask.get(0).getDescription(), containsString("\"query\":{\"match_all\"")); assertTaskHeaders(mainTask.get(0)); @@ -829,14 +827,12 @@ public void testTaskStoringSuccessfulResult() throws Exception { assertNoFailures(client().admin().indices().prepareRefresh(TaskResultsService.TASK_INDEX).get()); SearchResponse searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) - .setTypes(TaskResultsService.TASK_TYPE) .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.getAction()))) .get(); assertEquals(1L, searchResponse.getHits().getTotalHits().value); searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) - .setTypes(TaskResultsService.TASK_TYPE) .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.getTaskId().getNodeId()))) .get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java index d87bbbb0926c5..a1ddc4a27a1f9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java @@ -240,10 +240,8 @@ public void testShrinkIndexPrimaryTerm() throws Exception { final String s = Integer.toString(id); final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); if (hash == shardId) { - final IndexRequest request = new IndexRequest("source", "type", s).source( - "{ \"f\": \"" + s + "\"}", - XContentType.JSON - ); + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", XContentType.JSON); client().index(request).get(); break; } else { @@ -667,7 +665,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { IndexService indexShards = service.indexService(target.getIndex()); IndexShard shard = indexShards.getShard(0); assertTrue(shard.isActive()); - shard.checkIdle(0); + shard.flushOnIdle(0); assertFalse(shard.isActive()); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java index 86974322388ab..14d337c34daa5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java @@ -229,7 +229,7 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); for (int i = 0; i < numDocs; i++) { - GetResponse getResponse = client().prepareGet("first_split", "t1", Integer.toString(i)).setRouting(routingValue[i]).get(); + GetResponse getResponse = client().prepareGet("first_split", Integer.toString(i)).setRouting(routingValue[i]).get(); assertTrue(getResponse.isExists()); } @@ -274,7 +274,7 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha } flushAndRefresh(); for (int i = 0; i < numDocs; i++) { - GetResponse getResponse = client().prepareGet("second_split", "t1", Integer.toString(i)).setRouting(routingValue[i]).get(); + GetResponse getResponse = client().prepareGet("second_split", Integer.toString(i)).setRouting(routingValue[i]).get(); assertTrue(getResponse.isExists()); } assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); @@ -345,10 +345,8 @@ public void testSplitIndexPrimaryTerm() throws Exception { final String s = Integer.toString(id); final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); if (hash == shardId) { - final IndexRequest request = new IndexRequest("source", "type", s).source( - "{ \"f\": \"" + s + "\"}", - XContentType.JSON - ); + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", XContentType.JSON); client().index(request).get(); break; } else { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java index e33b140d288ac..ab934170b594a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java @@ -88,7 +88,6 @@ public void testBulkIndexCreatesMapping() throws Exception { assertBusy(() -> { GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings().get(); assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30")); - assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs")); }); } @@ -117,7 +116,7 @@ public void testBulkWithWriteIndexAndRouting() { .setSettings(twoShardsSettings) .get(); - IndexRequest indexRequestWithAlias = new IndexRequest("alias1", "type", "id"); + IndexRequest indexRequestWithAlias = new IndexRequest("alias1").id("id"); if (randomBoolean()) { indexRequestWithAlias.routing("1"); } @@ -127,19 +126,19 @@ public void testBulkWithWriteIndexAndRouting() { assertThat(bulkResponse.getItems()[0].getResponse().getShardId().getId(), equalTo(0)); assertThat(bulkResponse.getItems()[0].getResponse().getVersion(), equalTo(1L)); assertThat(bulkResponse.getItems()[0].getResponse().status(), equalTo(RestStatus.CREATED)); - assertThat(client().prepareGet("index3", "type", "id").setRouting("1").get().getSource().get("foo"), equalTo("baz")); + assertThat(client().prepareGet("index3", "id").setRouting("1").get().getSource().get("foo"), equalTo("baz")); bulkResponse = client().prepareBulk().add(client().prepareUpdate("alias1", "type", "id").setDoc("foo", "updated")).get(); assertFalse(bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); - assertThat(client().prepareGet("index3", "type", "id").setRouting("1").get().getSource().get("foo"), equalTo("updated")); + assertThat(client().prepareGet("index3", "id").setRouting("1").get().getSource().get("foo"), equalTo("updated")); bulkResponse = client().prepareBulk().add(client().prepareDelete("alias1", "type", "id")).get(); assertFalse(bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); - assertFalse(client().prepareGet("index3", "type", "id").setRouting("1").get().isExists()); + assertFalse(client().prepareGet("index3", "id").setRouting("1").get().isExists()); } // allowing the auto-generated timestamp to externally be set would allow making the index inconsistent with duplicate docs public void testExternallySetAutoGeneratedTimestamp() { - IndexRequest indexRequest = new IndexRequest("index1", "_doc").source(Collections.singletonMap("foo", "baz")); + IndexRequest indexRequest = new IndexRequest("index1").source(Collections.singletonMap("foo", "baz")); indexRequest.process(Version.CURRENT, null, null); // sets the timestamp if (randomBoolean()) { indexRequest.id("test"); @@ -163,7 +162,7 @@ public void testBulkWithGlobalDefaults() throws Exception { { createSamplePipeline("pipeline"); - BulkRequestBuilder bulkBuilder = client().prepareBulk("test", "type1").routing("routing").pipeline("pipeline"); + BulkRequestBuilder bulkBuilder = client().prepareBulk("test").routing("routing").pipeline("pipeline"); bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); BulkResponse bulkItemResponses = bulkBuilder.get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java index 20791f46ade59..850034bc631b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java @@ -40,14 +40,10 @@ import org.opensearch.client.Client; import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; -import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; @@ -173,7 +169,6 @@ public void testBulkProcessorConcurrentRequests() throws Exception { for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo("test")); // with concurrent requests > 1 we can't rely on the order of the bulk requests assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); // we do want to check that we don't get duplicate ids back @@ -253,17 +248,14 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception if (randomBoolean()) { testDocs++; processor.add( - new IndexRequest("test", "test", Integer.toString(testDocs)).source(Requests.INDEX_CONTENT_TYPE, "field", "value") + new IndexRequest("test").id(Integer.toString(testDocs)).source(Requests.INDEX_CONTENT_TYPE, "field", "value") ); - multiGetRequestBuilder.add("test", "test", Integer.toString(testDocs)); + multiGetRequestBuilder.add("test", Integer.toString(testDocs)); } else { testReadOnlyDocs++; processor.add( - new IndexRequest("test-ro", "test", Integer.toString(testReadOnlyDocs)).source( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ) + new IndexRequest("test-ro").id(Integer.toString(testReadOnlyDocs)) + .source(Requests.INDEX_CONTENT_TYPE, "field", "value") ); } } @@ -280,7 +272,6 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception Set readOnlyIds = new HashSet<>(); for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); - assertThat(bulkItemResponse.getType(), equalTo("test")); if (bulkItemResponse.getIndex().equals("test")) { assertThat(bulkItemResponse.isFailed(), equalTo(false)); // with concurrent requests > 1 we can't rely on the order of the bulk requests @@ -302,25 +293,11 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception private static MultiGetRequestBuilder indexDocs(Client client, BulkProcessor processor, int numDocs) throws Exception { MultiGetRequestBuilder multiGetRequestBuilder = client.prepareMultiGet(); for (int i = 1; i <= numDocs; i++) { - if (randomBoolean()) { - processor.add( - new IndexRequest("test", "test", Integer.toString(i)).source( - Requests.INDEX_CONTENT_TYPE, - "field", - randomRealisticUnicodeOfLengthBetween(1, 30) - ) - ); - } else { - final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" - + Integer.toString(i) - + "\"} }\n" - + Strings.toString( - JsonXContent.contentBuilder().startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject() - ) - + "\n"; - processor.add(new BytesArray(source), null, null, XContentType.JSON); - } - multiGetRequestBuilder.add("test", "test", Integer.toString(i)); + processor.add( + new IndexRequest("test").id(Integer.toString(i)) + .source(Requests.INDEX_CONTENT_TYPE, "field", randomRealisticUnicodeOfLengthBetween(1, 30)) + ); + multiGetRequestBuilder.add("test", Integer.toString(i)); } return multiGetRequestBuilder; } @@ -330,7 +307,6 @@ private static void assertResponseItems(List bulkItemResponses int i = 1; for (BulkItemResponse bulkItemResponse : bulkItemResponses) { assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo("test")); assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); assertThat( "item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), @@ -345,7 +321,6 @@ private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, in int i = 1; for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) { assertThat(multiGetItemResponse.getIndex(), equalTo("test")); - assertThat(multiGetItemResponse.getType(), equalTo("test")); assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++))); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java index bcda78ed6f788..68cb46fd20e50 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java @@ -159,11 +159,7 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) client().admin().indices().refresh(new RefreshRequest()).get(); - SearchResponse results = client().prepareSearch(INDEX_NAME) - .setTypes(TYPE_NAME) - .setQuery(QueryBuilders.matchAllQuery()) - .setSize(0) - .get(); + SearchResponse results = client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get(); if (rejectedExecutionExpected) { assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java index 570d1055a7a6c..f2b83fc92cc63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java @@ -177,17 +177,17 @@ public void testBulkUpdateSimple() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getId(), equalTo("3")); assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(2L)); - GetResponse getResponse = client().prepareGet().setIndex("test").setType("type1").setId("1").execute().actionGet(); + GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L)); - getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(3L)); - getResponse = client().prepareGet().setIndex("test").setType("type1").setId("3").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("3").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(getResponse.getSource().get("field1").toString(), equalTo("test")); @@ -217,15 +217,15 @@ public void testBulkUpdateSimple() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getIndex(), equalTo("test")); assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(3L)); - getResponse = client().prepareGet().setIndex("test").setType("type1").setId("6").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("6").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(0L)); - getResponse = client().prepareGet().setIndex("test").setType("type1").setId("7").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("7").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(false)); - getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(3L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(4L)); @@ -440,14 +440,13 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getVersion(), equalTo(1L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); - assertThat(response.getItems()[i].getType(), equalTo("type1")); assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(1L)); assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().sourceAsMap().get("counter"), equalTo(1)); for (int j = 0; j < 5; j++) { - GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("counter")).longValue(), equalTo(1L)); @@ -480,7 +479,6 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getVersion(), equalTo(2L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); - assertThat(response.getItems()[i].getType(), equalTo("type1")); assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(2L)); @@ -504,7 +502,6 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id))); assertThat(response.getItems()[i].getVersion(), equalTo(3L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); - assertThat(response.getItems()[i].getType(), equalTo("type1")); assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } } @@ -526,7 +523,6 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(response.getItems()[i].getItemId(), equalTo(i)); assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getIndex(), equalTo("test")); - assertThat(response.getItems()[i].getType(), equalTo("type1")); assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } @@ -550,10 +546,9 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(itemResponse.getItemId(), equalTo(i)); assertThat(itemResponse.getId(), equalTo(Integer.toString(i))); assertThat(itemResponse.getIndex(), equalTo("test")); - assertThat(itemResponse.getType(), equalTo("type1")); assertThat(itemResponse.getOpType(), equalTo(OpType.UPDATE)); for (int j = 0; j < 5; j++) { - GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).get(); + GetResponse getResponse = client().prepareGet("test", Integer.toString(i)).get(); assertThat(getResponse.isExists(), equalTo(false)); } } @@ -661,21 +656,21 @@ public void testThatInvalidIndexNamesShouldNotBreakCompleteBulkRequest() { // issue 6630 public void testThatFailedUpdateRequestReturnsCorrectType() throws Exception { BulkResponse indexBulkItemResponse = client().prepareBulk() - .add(new IndexRequest("test", "type", "3").source("{ \"title\" : \"Great Title of doc 3\" }", XContentType.JSON)) - .add(new IndexRequest("test", "type", "4").source("{ \"title\" : \"Great Title of doc 4\" }", XContentType.JSON)) - .add(new IndexRequest("test", "type", "5").source("{ \"title\" : \"Great Title of doc 5\" }", XContentType.JSON)) - .add(new IndexRequest("test", "type", "6").source("{ \"title\" : \"Great Title of doc 6\" }", XContentType.JSON)) + .add(new IndexRequest("test").id("3").source("{ \"title\" : \"Great Title of doc 3\" }", XContentType.JSON)) + .add(new IndexRequest("test").id("4").source("{ \"title\" : \"Great Title of doc 4\" }", XContentType.JSON)) + .add(new IndexRequest("test").id("5").source("{ \"title\" : \"Great Title of doc 5\" }", XContentType.JSON)) + .add(new IndexRequest("test").id("6").source("{ \"title\" : \"Great Title of doc 6\" }", XContentType.JSON)) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); assertNoFailures(indexBulkItemResponse); BulkResponse bulkItemResponse = client().prepareBulk() - .add(new IndexRequest("test", "type", "1").source("{ \"title\" : \"Great Title of doc 1\" }", XContentType.JSON)) - .add(new IndexRequest("test", "type", "2").source("{ \"title\" : \"Great Title of doc 2\" }", XContentType.JSON)) - .add(new UpdateRequest("test", "type", "3").doc("{ \"date\" : \"2014-01-30T23:59:57\"}", XContentType.JSON)) - .add(new UpdateRequest("test", "type", "4").doc("{ \"date\" : \"2014-13-30T23:59:57\"}", XContentType.JSON)) - .add(new DeleteRequest("test", "type", "5")) - .add(new DeleteRequest("test", "type", "6")) + .add(new IndexRequest("test").id("1").source("{ \"title\" : \"Great Title of doc 1\" }", XContentType.JSON)) + .add(new IndexRequest("test").id("2").source("{ \"title\" : \"Great Title of doc 2\" }", XContentType.JSON)) + .add(new UpdateRequest("test", "3").doc("{ \"date\" : \"2014-01-30T23:59:57\"}", XContentType.JSON)) + .add(new UpdateRequest("test", "4").doc("{ \"date\" : \"2014-13-30T23:59:57\"}", XContentType.JSON)) + .add(new DeleteRequest("test", "5")) + .add(new DeleteRequest("test", "6")) .get(); assertNoFailures(indexBulkItemResponse); @@ -696,11 +691,11 @@ private static String indexOrAlias() { public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception { createIndex("bulkindex1", "bulkindex2"); BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(new IndexRequest("bulkindex1", "index1_type", "1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo1")) - .add(new IndexRequest("bulkindex2", "index2_type", "1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) - .add(new IndexRequest("bulkindex2", "index2_type").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) - .add(new UpdateRequest("bulkindex2", "index2_type", "2").doc(Requests.INDEX_CONTENT_TYPE, "foo", "bar")) - .add(new DeleteRequest("bulkindex2", "index2_type", "3")) + bulkRequest.add(new IndexRequest("bulkindex1").id("1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo1")) + .add(new IndexRequest("bulkindex2").id("1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) + .add(new IndexRequest("bulkindex2").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) + .add(new UpdateRequest("bulkindex2", "2").doc(Requests.INDEX_CONTENT_TYPE, "foo", "bar")) + .add(new DeleteRequest("bulkindex2", "3")) .setRefreshPolicy(RefreshPolicy.IMMEDIATE); client().bulk(bulkRequest).get(); @@ -710,11 +705,11 @@ public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception { assertBusy(() -> assertAcked(client().admin().indices().prepareClose("bulkindex2"))); BulkRequest bulkRequest2 = new BulkRequest(); - bulkRequest2.add(new IndexRequest("bulkindex1", "index1_type", "1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo1")) - .add(new IndexRequest("bulkindex2", "index2_type", "1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) - .add(new IndexRequest("bulkindex2", "index2_type").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) - .add(new UpdateRequest("bulkindex2", "index2_type", "2").doc(Requests.INDEX_CONTENT_TYPE, "foo", "bar")) - .add(new DeleteRequest("bulkindex2", "index2_type", "3")) + bulkRequest2.add(new IndexRequest("bulkindex1").id("1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo1")) + .add(new IndexRequest("bulkindex2").id("1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) + .add(new IndexRequest("bulkindex2").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo2")) + .add(new UpdateRequest("bulkindex2", "2").doc(Requests.INDEX_CONTENT_TYPE, "foo", "bar")) + .add(new DeleteRequest("bulkindex2", "3")) .setRefreshPolicy(RefreshPolicy.IMMEDIATE); BulkResponse bulkResponse = client().bulk(bulkRequest2).get(); @@ -730,9 +725,9 @@ public void testFailedRequestsOnClosedIndex() throws Exception { assertBusy(() -> assertAcked(client().admin().indices().prepareClose("bulkindex1"))); BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(RefreshPolicy.IMMEDIATE); - bulkRequest.add(new IndexRequest("bulkindex1", "index1_type", "1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo1")) - .add(new UpdateRequest("bulkindex1", "index1_type", "1").doc(Requests.INDEX_CONTENT_TYPE, "foo", "bar")) - .add(new DeleteRequest("bulkindex1", "index1_type", "1")); + bulkRequest.add(new IndexRequest("bulkindex1").id("1").source(Requests.INDEX_CONTENT_TYPE, "text", "hallo1")) + .add(new UpdateRequest("bulkindex1", "1").doc(Requests.INDEX_CONTENT_TYPE, "foo", "bar")) + .add(new DeleteRequest("bulkindex1", "1")); BulkResponse bulkResponse = client().bulk(bulkRequest).get(); assertThat(bulkResponse.hasFailures(), is(true)); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java index ac2be1a15c43e..52333061f3e6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java @@ -95,7 +95,7 @@ public void testNoSuchDoc() throws Exception { client().prepareIndex("test", "type1", "666").setSource("field", "foo bar").execute().actionGet(); refresh(); for (int i = 0; i < 20; i++) { - ActionFuture termVector = client().termVectors(new TermVectorsRequest(indexOrAlias(), "type1", "" + i)); + ActionFuture termVector = client().termVectors(new TermVectorsRequest(indexOrAlias(), "" + i)); TermVectorsResponse actionGet = termVector.actionGet(); assertThat(actionGet, notNullValue()); assertThat(actionGet.getIndex(), equalTo("test")); @@ -122,7 +122,7 @@ public void testExistingFieldWithNoTermVectorsNoNPE() throws Exception { client().prepareIndex("test", "type1", "0").setSource("existingfield", "?").execute().actionGet(); refresh(); ActionFuture termVector = client().termVectors( - new TermVectorsRequest(indexOrAlias(), "type1", "0").selectedFields(new String[] { "existingfield" }) + new TermVectorsRequest(indexOrAlias(), "0").selectedFields(new String[] { "existingfield" }) ); // lets see if the null term vectors are caught... @@ -150,7 +150,7 @@ public void testExistingFieldButNotInDocNPE() throws Exception { client().prepareIndex("test", "type1", "0").setSource("anotherexistingfield", 1).execute().actionGet(); refresh(); ActionFuture termVectors = client().termVectors( - new TermVectorsRequest(indexOrAlias(), "type1", "0").selectedFields(randomBoolean() ? new String[] { "existingfield" } : null) + new TermVectorsRequest(indexOrAlias(), "0").selectedFields(randomBoolean() ? new String[] { "existingfield" } : null) .termStatistics(true) .fieldStatistics(true) ); @@ -191,9 +191,7 @@ public void testNotIndexedField() throws Exception { indexRandom(true, indexBuilders); for (int i = 0; i < 4; i++) { - TermVectorsResponse resp = client().prepareTermVectors(indexOrAlias(), "type1", String.valueOf(i)) - .setSelectedFields("field" + i) - .get(); + TermVectorsResponse resp = client().prepareTermVectors(indexOrAlias(), String.valueOf(i)).setSelectedFields("field" + i).get(); assertThat(resp, notNullValue()); assertThat(resp.isExists(), equalTo(true)); assertThat(resp.getIndex(), equalTo("test")); @@ -201,9 +199,7 @@ public void testNotIndexedField() throws Exception { } for (int i = 4; i < 6; i++) { - TermVectorsResponse resp = client().prepareTermVectors(indexOrAlias(), "type1", String.valueOf(i)) - .setSelectedFields("field" + i) - .get(); + TermVectorsResponse resp = client().prepareTermVectors(indexOrAlias(), String.valueOf(i)).setSelectedFields("field" + i).get(); assertThat(resp.getIndex(), equalTo("test")); assertThat("field" + i + " :", resp.getFields().terms("field" + i), notNullValue()); } @@ -245,7 +241,7 @@ public void testSimpleTermVectors() throws IOException { refresh(); } for (int i = 0; i < 10; i++) { - TermVectorsRequestBuilder resp = client().prepareTermVectors(indexOrAlias(), "type1", Integer.toString(i)) + TermVectorsRequestBuilder resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(i)) .setPayloads(true) .setOffsets(true) .setPositions(true) @@ -362,7 +358,7 @@ public void testRandomSingleTermVectors() throws IOException { boolean isPositionsRequested = randomBoolean(); String infoString = createInfoString(isPositionsRequested, isOffsetRequested, optionString); for (int i = 0; i < 10; i++) { - TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "_doc", Integer.toString(i)) + TermVectorsRequestBuilder resp = client().prepareTermVectors("test", Integer.toString(i)) .setOffsets(isOffsetRequested) .setPositions(isPositionsRequested) .setSelectedFields(); @@ -501,7 +497,7 @@ public void testSimpleTermVectorsWithGenerate() throws IOException { } for (int i = 0; i < 10; i++) { - TermVectorsResponse response = client().prepareTermVectors("test", "type1", Integer.toString(i)) + TermVectorsResponse response = client().prepareTermVectors("test", Integer.toString(i)) .setPayloads(true) .setOffsets(true) .setPositions(true) @@ -590,7 +586,7 @@ public void testDuelWithAndWithoutTermVectors() throws IOException, ExecutionExc for (int id = 0; id < content.length; id++) { Fields[] fields = new Fields[2]; for (int j = 0; j < indexNames.length; j++) { - TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], "type1", String.valueOf(id)) + TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], String.valueOf(id)) .setOffsets(true) .setPositions(true) .setSelectedFields("field1") @@ -661,7 +657,7 @@ public void testSimpleWildCards() throws IOException { client().prepareIndex("test", "type1", "0").setSource(source).get(); refresh(); - TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "type1", "0").setSelectedFields("field*").get(); + TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "0").setSelectedFields("field*").get(); assertThat("Doc doesn't exists but should", response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat("All term vectors should have been generated", response.getFields().size(), equalTo(numFields)); @@ -692,7 +688,7 @@ public void testArtificialVsExisting() throws ExecutionException, InterruptedExc for (int i = 0; i < content.length; i++) { // request tvs from existing document - TermVectorsResponse respExisting = client().prepareTermVectors("test", "type1", String.valueOf(i)) + TermVectorsResponse respExisting = client().prepareTermVectors("test", String.valueOf(i)) .setOffsets(true) .setPositions(true) .setFieldStatistics(true) @@ -703,7 +699,6 @@ public void testArtificialVsExisting() throws ExecutionException, InterruptedExc // request tvs from artificial document TermVectorsResponse respArtificial = client().prepareTermVectors() .setIndex("test") - .setType("type1") .setRouting(String.valueOf(i)) // ensure we get the stats from the same shard as existing doc .setDoc(jsonBuilder().startObject().field("field1", content[i]).endObject()) .setOffsets(true) @@ -728,7 +723,6 @@ public void testArtificialNoDoc() throws IOException { String text = "the quick brown fox jumps over the lazy dog"; TermVectorsResponse resp = client().prepareTermVectors() .setIndex("test") - .setType("type1") .setDoc(jsonBuilder().startObject().field("field1", text).endObject()) .setOffsets(true) .setPositions(true) @@ -798,15 +792,13 @@ public void testPerFieldAnalyzer() throws IOException { } // selected fields not specified - TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "type1", "0") - .setPerFieldAnalyzer(perFieldAnalyzer) - .get(); + TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "0").setPerFieldAnalyzer(perFieldAnalyzer).get(); // should return all fields that have terms vectors, some with overridden analyzer checkAnalyzedFields(response.getFields(), withTermVectors, perFieldAnalyzer); // selected fields specified including some not in the mapping - response = client().prepareTermVectors(indexOrAlias(), "type1", "0") + response = client().prepareTermVectors(indexOrAlias(), "0") .setSelectedFields(selectedFields.toArray(Strings.EMPTY_ARRAY)) .setPerFieldAnalyzer(perFieldAnalyzer) .get(); @@ -848,7 +840,7 @@ public void testTermVectorsWithVersion() { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSettings(Settings.builder().put("index.refresh_interval", -1))); ensureGreen(); - TermVectorsResponse response = client().prepareTermVectors("test", "type1", "1").get(); + TermVectorsResponse response = client().prepareTermVectors("test", "1").get(); assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); @@ -857,18 +849,18 @@ public void testTermVectorsWithVersion() { // From translog: // version 0 means ignore version, which is the default - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getVersion(), equalTo(1L)); - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(1).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getVersion(), equalTo(1L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(2).get(); fail(); } catch (VersionConflictEngineException e) { // all good @@ -878,20 +870,20 @@ public void testTermVectorsWithVersion() { refresh(); // version 0 means ignore version, which is the default - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(1L)); - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(1).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(1L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(2).setRealtime(false).get(); fail(); } catch (VersionConflictEngineException e) { // all good @@ -903,20 +895,20 @@ public void testTermVectorsWithVersion() { // From translog: // version 0 means ignore version, which is the default - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(2L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(1).get(); fail(); } catch (VersionConflictEngineException e) { // all good } - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(2).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(2).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); @@ -926,20 +918,20 @@ public void testTermVectorsWithVersion() { refresh(); // version 0 means ignore version, which is the default - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(2L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(1).setRealtime(false).get(); fail(); } catch (VersionConflictEngineException e) { // all good } - response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get(); + response = client().prepareTermVectors(indexOrAlias(), "1").setVersion(2).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); @@ -969,7 +961,7 @@ public void testFilterLength() throws ExecutionException, InterruptedException, TermVectorsResponse response; for (int i = 0; i < numTerms; i++) { filterSettings.minWordLength = numTerms - i; - response = client().prepareTermVectors("test", "type1", "1") + response = client().prepareTermVectors("test", "1") .setSelectedFields("tags") .setFieldStatistics(true) .setTermStatistics(true) @@ -1004,7 +996,7 @@ public void testFilterTermFreq() throws ExecutionException, InterruptedException TermVectorsResponse response; for (int i = 0; i < numTerms; i++) { filterSettings.maxNumTerms = i + 1; - response = client().prepareTermVectors("test", "type1", "1") + response = client().prepareTermVectors("test", "1") .setSelectedFields("tags") .setFieldStatistics(true) .setTermStatistics(true) @@ -1037,7 +1029,7 @@ public void testFilterDocFreq() throws ExecutionException, InterruptedException, TermVectorsResponse response; for (int i = 0; i < numDocs; i++) { filterSettings.maxNumTerms = i + 1; - response = client().prepareTermVectors("test", "type1", (numDocs - 1) + "") + response = client().prepareTermVectors("test", (numDocs - 1) + "") .setSelectedFields("tags") .setFieldStatistics(true) .setTermStatistics(true) @@ -1068,7 +1060,6 @@ public void testArtificialDocWithPreference() throws InterruptedException, IOExc for (Integer shardId : shardIds) { TermVectorsResponse tvResponse = client().prepareTermVectors() .setIndex("test") - .setType("type1") .setPreference("_shards:" + shardId) .setDoc(jsonBuilder().startObject().field("field1", "random permutation").endObject()) .setFieldStatistics(true) @@ -1132,7 +1123,7 @@ public void testWithKeywordAndNormalizer() throws IOException, ExecutionExceptio for (int id = 0; id < content.length; id++) { Fields[] fields = new Fields[2]; for (int j = 0; j < indexNames.length; j++) { - TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], "type1", String.valueOf(id)) + TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], String.valueOf(id)) .setOffsets(true) .setPositions(true) .setSelectedFields("field1", "field2") diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java index da9d7876223a9..1228ec85c2b08 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java @@ -83,7 +83,7 @@ public void testDuelESLucene() throws Exception { } public void testMissingIndexThrowsMissingIndex() throws Exception { - TermVectorsRequestBuilder requestBuilder = client().prepareTermVectors("testX", "typeX", Integer.toString(1)); + TermVectorsRequestBuilder requestBuilder = client().prepareTermVectors("testX", Integer.toString(1)); MultiTermVectorsRequestBuilder mtvBuilder = client().prepareMultiTermVectors(); mtvBuilder.add(requestBuilder.request()); MultiTermVectorsResponse response = mtvBuilder.execute().actionGet(); @@ -96,7 +96,7 @@ public void testMultiTermVectorsWithVersion() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSettings(Settings.builder().put("index.refresh_interval", -1))); ensureGreen(); - MultiTermVectorsResponse response = client().prepareMultiTermVectors().add(indexOrAlias(), "type1", "1").get(); + MultiTermVectorsResponse response = client().prepareMultiTermVectors().add(indexOrAlias(), "1").get(); assertThat(response.getResponses().length, equalTo(1)); assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); @@ -106,9 +106,9 @@ public void testMultiTermVectorsWithVersion() throws Exception { // Version from translog response = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2)) + .add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(Versions.MATCH_ANY)) + .add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(1)) + .add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(2)) .get(); assertThat(response.getResponses().length, equalTo(3)); // [0] version doesn't matter, which is the default @@ -130,9 +130,9 @@ public void testMultiTermVectorsWithVersion() throws Exception { // Version from Lucene index refresh(); response = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1).realtime(false)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2).realtime(false)) + .add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false)) + .add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(1).realtime(false)) + .add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(2).realtime(false)) .get(); assertThat(response.getResponses().length, equalTo(3)); // [0] version doesn't matter, which is the default @@ -155,9 +155,9 @@ public void testMultiTermVectorsWithVersion() throws Exception { // Version from translog response = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(Versions.MATCH_ANY)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(1)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(2)) + .add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(Versions.MATCH_ANY)) + .add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(1)) + .add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(2)) .get(); assertThat(response.getResponses().length, equalTo(3)); // [0] version doesn't matter, which is the default @@ -180,9 +180,9 @@ public void testMultiTermVectorsWithVersion() throws Exception { // Version from Lucene index refresh(); response = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(Versions.MATCH_ANY)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(1)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(2)) + .add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(Versions.MATCH_ANY)) + .add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(1)) + .add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(2)) .get(); assertThat(response.getResponses().length, equalTo(3)); // [0] version doesn't matter, which is the default diff --git a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java index fa2ebe3fa2108..541fe495ee8e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java @@ -117,7 +117,7 @@ public void testAliases() throws Exception { logger.info("--> indexing against [alias1], should fail now"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> client().index(indexRequest("alias1").type("type1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() + () -> client().index(indexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() ); assertThat( exception.getMessage(), @@ -134,9 +134,8 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work now"); - IndexResponse indexResponse = client().index( - indexRequest("alias1").type("type1").id("1").source(source("1", "test"), XContentType.JSON) - ).actionGet(); + IndexResponse indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)) + .actionGet(); assertThat(indexResponse.getIndex(), equalTo("test")); logger.info("--> creating index [test_x]"); @@ -152,7 +151,7 @@ public void testAliases() throws Exception { logger.info("--> indexing against [alias1], should fail now"); exception = expectThrows( IllegalArgumentException.class, - () -> client().index(indexRequest("alias1").type("type1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() + () -> client().index(indexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() ); assertThat( exception.getMessage(), @@ -164,10 +163,7 @@ public void testAliases() throws Exception { ); logger.info("--> deleting against [alias1], should fail now"); - exception = expectThrows( - IllegalArgumentException.class, - () -> client().delete(deleteRequest("alias1").type("type1").id("1")).actionGet() - ); + exception = expectThrows(IllegalArgumentException.class, () -> client().delete(deleteRequest("alias1").id("1")).actionGet()); assertThat( exception.getMessage(), equalTo( @@ -183,8 +179,7 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work now"); - indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"), XContentType.JSON)) - .actionGet(); + indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); assertThat(indexResponse.getIndex(), equalTo("test")); assertAliasesVersionIncreases("test_x", () -> { @@ -193,12 +188,11 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work now"); - indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"), XContentType.JSON)) - .actionGet(); + indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); assertThat(indexResponse.getIndex(), equalTo("test_x")); logger.info("--> deleting against [alias1], should fail now"); - DeleteResponse deleteResponse = client().delete(deleteRequest("alias1").type("type1").id("1")).actionGet(); + DeleteResponse deleteResponse = client().delete(deleteRequest("alias1").id("1")).actionGet(); assertThat(deleteResponse.getIndex(), equalTo("test_x")); assertAliasesVersionIncreases("test_x", () -> { @@ -207,8 +201,7 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work against [test_x]"); - indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"), XContentType.JSON)) - .actionGet(); + indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); assertThat(indexResponse.getIndex(), equalTo("test_x")); } @@ -290,28 +283,16 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { logger.info("--> indexing against [test]"); client().index( - indexRequest("test").type("type1") - .id("1") - .source(source("1", "foo test"), XContentType.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("1").source(source("1", "foo test"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("2") - .source(source("2", "bar test"), XContentType.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("2").source(source("2", "bar test"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("3") - .source(source("3", "baz test"), XContentType.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("3").source(source("3", "baz test"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("4") - .source(source("4", "something else"), XContentType.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("4").source(source("4", "something else"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); logger.info("--> checking single filtering alias search"); @@ -408,16 +389,16 @@ public void testSearchingFilteringAliasesTwoIndices() throws Exception { ); logger.info("--> indexing against [test1]"); - client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("1").source(source("1", "foo test"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("2").source(source("2", "bar test"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("3").source(source("3", "baz test"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("4").source(source("4", "something else"), XContentType.JSON)).get(); logger.info("--> indexing against [test2]"); - client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("5").source(source("5", "foo test"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("6").source(source("6", "bar test"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("7").source(source("7", "baz test"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("8").source(source("8", "something else"), XContentType.JSON)).get(); refresh(); @@ -524,17 +505,17 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { ); logger.info("--> indexing against [test1]"); - client().index(indexRequest("test1").type("type1").id("11").source(source("11", "foo test1"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("12").source(source("12", "bar test1"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("13").source(source("13", "baz test1"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("11").source(source("11", "foo test1"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("12").source(source("12", "bar test1"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("13").source(source("13", "baz test1"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("21").source(source("21", "foo test2"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("22").source(source("22", "bar test2"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("23").source(source("23", "baz test2"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("21").source(source("21", "foo test2"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("22").source(source("22", "bar test2"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("23").source(source("23", "baz test2"), XContentType.JSON)).get(); - client().index(indexRequest("test3").type("type1").id("31").source(source("31", "foo test3"), XContentType.JSON)).get(); - client().index(indexRequest("test3").type("type1").id("32").source(source("32", "bar test3"), XContentType.JSON)).get(); - client().index(indexRequest("test3").type("type1").id("33").source(source("33", "baz test3"), XContentType.JSON)).get(); + client().index(indexRequest("test3").id("31").source(source("31", "foo test3"), XContentType.JSON)).get(); + client().index(indexRequest("test3").id("32").source(source("32", "bar test3"), XContentType.JSON)).get(); + client().index(indexRequest("test3").id("33").source(source("33", "baz test3"), XContentType.JSON)).get(); refresh(); @@ -647,16 +628,16 @@ public void testDeletingByQueryFilteringAliases() throws Exception { ); logger.info("--> indexing against [test1]"); - client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("1").source(source("1", "foo test"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("2").source(source("2", "bar test"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("3").source(source("3", "baz test"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("4").source(source("4", "something else"), XContentType.JSON)).get(); logger.info("--> indexing against [test2]"); - client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("5").source(source("5", "foo test"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("6").source(source("6", "bar test"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("7").source(source("7", "baz test"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("8").source(source("8", "something else"), XContentType.JSON)).get(); refresh(); @@ -744,7 +725,7 @@ public void testWaitForAliasCreationMultipleShards() throws Exception { for (int i = 0; i < 10; i++) { final String aliasName = "alias" + i; assertAliasesVersionIncreases("test", () -> assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName))); - client().index(indexRequest(aliasName).type("type1").id("1").source(source("1", "test"), XContentType.JSON)).get(); + client().index(indexRequest(aliasName).id("1").source(source("1", "test"), XContentType.JSON)).get(); } } @@ -765,7 +746,7 @@ public void testWaitForAliasCreationSingleShard() throws Exception { for (int i = 0; i < 10; i++) { final String aliasName = "alias" + i; assertAliasesVersionIncreases("test", () -> assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName))); - client().index(indexRequest(aliasName).type("type1").id("1").source(source("1", "test"), XContentType.JSON)).get(); + client().index(indexRequest(aliasName).id("1").source(source("1", "test"), XContentType.JSON)).get(); } } @@ -787,8 +768,7 @@ public void run() { "test", () -> assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName)) ); - client().index(indexRequest(aliasName).type("type1").id("1").source(source("1", "test"), XContentType.JSON)) - .actionGet(); + client().index(indexRequest(aliasName).id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); } }); } diff --git a/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java b/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java index c45155809a5ea..f9f99eb2662b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java @@ -40,7 +40,7 @@ import java.io.IOException; import static org.opensearch.client.Requests.indexRequest; -import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -57,16 +57,16 @@ public void testBroadcastOperations() throws IOException { NumShards numShards = getNumShards("test"); logger.info("Running Cluster Health"); - client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + client().index(indexRequest("test").id("1").source(source("1", "test"))).actionGet(); flush(); - client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet(); + client().index(indexRequest("test").id("2").source(source("2", "test"))).actionGet(); refresh(); logger.info("Count"); // check count for (int i = 0; i < 5; i++) { // test successful - SearchResponse countResponse = client().prepareSearch("test").setSize(0).setQuery(termQuery("_type", "type1")).get(); + SearchResponse countResponse = client().prepareSearch("test").setSize(0).setQuery(matchAllQuery()).get(); assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries)); assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/NoMasterNodeIT.java index 1fd61c9e063d0..f8f686b27f29b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/NoMasterNodeIT.java @@ -115,25 +115,25 @@ public void testNoMasterActions() throws Exception { }); assertRequestBuilderThrows( - clientToMasterlessNode.prepareGet("test", "type1", "1"), + clientToMasterlessNode.prepareGet("test", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareGet("no_index", "type1", "1"), + clientToMasterlessNode.prepareGet("no_index", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareMultiGet().add("test", "type1", "1"), + clientToMasterlessNode.prepareMultiGet().add("test", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareMultiGet().add("no_index", "type1", "1"), + clientToMasterlessNode.prepareMultiGet().add("no_index", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); @@ -275,7 +275,7 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); - GetResponse getResponse = clientToMasterlessNode.prepareGet("test1", "type1", "1").get(); + GetResponse getResponse = clientToMasterlessNode.prepareGet("test1", "1").get(); assertExists(getResponse); SearchResponse countResponse = clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0).get(); @@ -371,10 +371,10 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { } }); - GetResponse getResponse = client(randomFrom(nodesWithShards)).prepareGet("test1", "type1", "1").get(); + GetResponse getResponse = client(randomFrom(nodesWithShards)).prepareGet("test1", "1").get(); assertExists(getResponse); - expectThrows(Exception.class, () -> client(partitionedNode).prepareGet("test1", "type1", "1").get()); + expectThrows(Exception.class, () -> client(partitionedNode).prepareGet("test1", "1").get()); SearchResponse countResponse = client(randomFrom(nodesWithShards)).prepareSearch("test1") .setAllowPartialSearchResults(true) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index 6317d633f25ea..233dca2dabb28 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -378,7 +378,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { final ActionFuture docIndexResponse = client().prepareIndex("index", "type", "1").setSource("field", 42).execute(); - assertBusy(() -> assertTrue(client().prepareGet("index", "type", "1").get().isExists())); + assertBusy(() -> assertTrue(client().prepareGet("index", "1").get().isExists())); // index another document, this time using dynamic mappings. // The ack timeout of 0 on dynamic mapping updates makes it possible for the document to be indexed on the primary, even @@ -400,7 +400,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { assertNotNull(mapper.mappers().getMapper("field2")); }); - assertBusy(() -> assertTrue(client().prepareGet("index", "type", "2").get().isExists())); + assertBusy(() -> assertTrue(client().prepareGet("index", "2").get().isExists())); // The mappings have not been propagated to the replica yet as a consequence the document count not be indexed // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 5c07ef8e7baea..ea5bb145cfd75 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -439,7 +439,7 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti logger.info("--> verify 1 doc in the index"); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(true)); logger.info("--> stop data-only node and detach it from the old cluster"); Settings dataNodeDataPathSettings = Settings.builder() @@ -474,7 +474,7 @@ public boolean clearData(String nodeName) { ensureGreen("test"); logger.info("--> verify the doc is there"); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(true)); } public void testNoInitialBootstrapAfterDetach() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 0bfd3e22a3bc9..6da62ab5107c9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -255,7 +255,7 @@ public void testAckedIndexing() throws Exception { for (String id : ackedDocs.keySet()) { assertTrue( "doc [" + id + "] indexed via node [" + ackedDocs.get(id) + "] not found", - client(node).prepareGet("test", "type", id).setPreference("_local").get().isExists() + client(node).prepareGet("test", id).setPreference("_local").get().isExists() ); } } catch (AssertionError | NoShardAvailableActionException e) { @@ -316,7 +316,7 @@ public void testRejoinDocumentExistsInAllShardCopies() throws Exception { logger.info("Verifying if document exists via node[{}]", notIsolatedNode); GetResponse getResponse = internalCluster().client(notIsolatedNode) - .prepareGet("test", "type", indexResponse.getId()) + .prepareGet("test", indexResponse.getId()) .setPreference("_local") .get(); assertThat(getResponse.isExists(), is(true)); @@ -330,7 +330,7 @@ public void testRejoinDocumentExistsInAllShardCopies() throws Exception { for (String node : nodes) { logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node); - getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId()).setPreference("_local").get(); + getResponse = internalCluster().client(node).prepareGet("test", indexResponse.getId()).setPreference("_local").get(); assertThat(getResponse.isExists(), is(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(getResponse.getId(), equalTo(indexResponse.getId())); diff --git a/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java index d1138351bde76..4ca281fad157a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java @@ -90,7 +90,6 @@ public void testIndexActions() throws Exception { .get(); assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName())); assertThat(indexResponse.getId(), equalTo("1")); - assertThat(indexResponse.getType(), equalTo("type1")); logger.info("Refreshing"); RefreshResponse refreshResponse = refresh(); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); @@ -117,18 +116,18 @@ public void testIndexActions() throws Exception { logger.info("Get [type1/1]"); for (int i = 0; i < 5; i++) { - getResult = client().prepareGet("test", "type1", "1").execute().actionGet(); + getResult = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test")); - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); } logger.info("Get [type1/1] with script"); for (int i = 0; i < 5; i++) { - getResult = client().prepareGet("test", "type1", "1").setStoredFields("name").execute().actionGet(); + getResult = client().prepareGet("test", "1").setStoredFields("name").execute().actionGet(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat(getResult.isExists(), equalTo(true)); assertThat(getResult.getSourceAsBytes(), nullValue()); @@ -137,7 +136,7 @@ public void testIndexActions() throws Exception { logger.info("Get [type1/2] (should be empty)"); for (int i = 0; i < 5; i++) { - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); assertThat(getResult.isExists(), equalTo(false)); } @@ -145,20 +144,19 @@ public void testIndexActions() throws Exception { DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").execute().actionGet(); assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName())); assertThat(deleteResponse.getId(), equalTo("1")); - assertThat(deleteResponse.getType(), equalTo("type1")); logger.info("Refreshing"); client().admin().indices().refresh(refreshRequest("test")).actionGet(); logger.info("Get [type1/1] (should be empty)"); for (int i = 0; i < 5; i++) { - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.isExists(), equalTo(false)); } logger.info("Index [type1/1]"); - client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet(); + client().index(indexRequest("test").id("1").source(source("1", "test"))).actionGet(); logger.info("Index [type1/2]"); - client().index(indexRequest("test").type("type1").id("2").source(source("2", "test2"))).actionGet(); + client().index(indexRequest("test").id("2").source(source("2", "test2"))).actionGet(); logger.info("Flushing"); FlushResponse flushResult = client().admin().indices().prepareFlush("test").execute().actionGet(); @@ -169,10 +167,10 @@ public void testIndexActions() throws Exception { logger.info("Get [type1/1] and [type1/2]"); for (int i = 0; i < 5; i++) { - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); String ste1 = getResult.getSourceAsString(); String ste2 = Strings.toString(source("2", "test2")); assertThat("cycle #" + i, ste1, equalTo(ste2)); @@ -228,37 +226,31 @@ public void testBulk() throws Exception { assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[0].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[0].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[1].getOpType(), equalTo(OpType.CREATE)); assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[1].getId(), equalTo("2")); assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[2].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1")); String generatedId3 = bulkResponse.getItems()[2].getId(); assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[3].getOpType(), equalTo(OpType.CREATE)); assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1")); String generatedId4 = bulkResponse.getItems()[3].getId(); assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[4].getOpType(), equalTo(OpType.DELETE)); assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[4].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[5].isFailed(), equalTo(true)); assertThat(bulkResponse.getItems()[5].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[5].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[5].getType(), equalTo("type1")); waitForRelocation(ClusterHealthStatus.GREEN); RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet(); @@ -266,15 +258,15 @@ public void testBulk() throws Exception { assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); for (int i = 0; i < 5; i++) { - GetResponse getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + GetResponse getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat("cycle #" + i, getResult.isExists(), equalTo(false)); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("2", "test")))); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); - getResult = client().get(getRequest("test").type("type1").id(generatedId3)).actionGet(); + getResult = client().get(getRequest("test").id(generatedId3)).actionGet(); assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("3", "test")))); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); diff --git a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java index ccb2920c274eb..c90aa333604d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java @@ -69,7 +69,7 @@ public void testRepurpose() throws Exception { ensureGreen(); - assertTrue(client().prepareGet(indexName, "type1", "1").get().isExists()); + assertTrue(client().prepareGet(indexName, "1").get().isExists()); final Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); final Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); @@ -112,7 +112,7 @@ public void testRepurpose() throws Exception { internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings); assertTrue(indexExists(indexName)); - expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "type1", "1").get()); + expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "1").get()); logger.info("--> Restarting and repurposing other node"); diff --git a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java index 79fe3a9119eae..178a424d07a7c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java @@ -65,36 +65,33 @@ public void testSimple() throws Exception { client().prepareIndex("test", "test", "1").setSource("field", "value1").get(); - ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1").setQuery(QueryBuilders.matchAllQuery()).get(); + ExplainResponse response = client().prepareExplain(indexOrAlias(), "1").setQuery(QueryBuilders.matchAllQuery()).get(); assertNotNull(response); assertFalse(response.isExists()); // not a match b/c not realtime assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getType(), equalTo("test")); assertThat(response.getId(), equalTo("1")); assertFalse(response.isMatch()); // not a match b/c not realtime refresh(); - response = client().prepareExplain(indexOrAlias(), "test", "1").setQuery(QueryBuilders.matchAllQuery()).get(); + response = client().prepareExplain(indexOrAlias(), "1").setQuery(QueryBuilders.matchAllQuery()).get(); assertNotNull(response); assertTrue(response.isMatch()); assertNotNull(response.getExplanation()); assertTrue(response.getExplanation().isMatch()); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getType(), equalTo("test")); assertThat(response.getId(), equalTo("1")); assertThat(response.getExplanation().getValue(), equalTo(1.0f)); - response = client().prepareExplain(indexOrAlias(), "test", "1").setQuery(QueryBuilders.termQuery("field", "value2")).get(); + response = client().prepareExplain(indexOrAlias(), "1").setQuery(QueryBuilders.termQuery("field", "value2")).get(); assertNotNull(response); assertTrue(response.isExists()); assertFalse(response.isMatch()); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getType(), equalTo("test")); assertThat(response.getId(), equalTo("1")); assertNotNull(response.getExplanation()); assertFalse(response.getExplanation().isMatch()); - response = client().prepareExplain(indexOrAlias(), "test", "1") + response = client().prepareExplain(indexOrAlias(), "1") .setQuery( QueryBuilders.boolQuery().must(QueryBuilders.termQuery("field", "value1")).must(QueryBuilders.termQuery("field", "value2")) ) @@ -103,18 +100,16 @@ public void testSimple() throws Exception { assertTrue(response.isExists()); assertFalse(response.isMatch()); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getType(), equalTo("test")); assertThat(response.getId(), equalTo("1")); assertNotNull(response.getExplanation()); assertFalse(response.getExplanation().isMatch()); assertThat(response.getExplanation().getDetails().length, equalTo(2)); - response = client().prepareExplain(indexOrAlias(), "test", "2").setQuery(QueryBuilders.matchAllQuery()).get(); + response = client().prepareExplain(indexOrAlias(), "2").setQuery(QueryBuilders.matchAllQuery()).get(); assertNotNull(response); assertFalse(response.isExists()); assertFalse(response.isMatch()); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getType(), equalTo("test")); assertThat(response.getId(), equalTo("2")); } @@ -132,7 +127,7 @@ public void testExplainWithFields() throws Exception { .get(); refresh(); - ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1") + ExplainResponse response = client().prepareExplain(indexOrAlias(), "1") .setQuery(QueryBuilders.matchAllQuery()) .setStoredFields("obj1.field1") .get(); @@ -149,7 +144,7 @@ public void testExplainWithFields() throws Exception { assertThat(response.getGetResult().isSourceEmpty(), equalTo(true)); refresh(); - response = client().prepareExplain(indexOrAlias(), "test", "1") + response = client().prepareExplain(indexOrAlias(), "1") .setQuery(QueryBuilders.matchAllQuery()) .setStoredFields("obj1.field1") .setFetchSource(true) @@ -166,20 +161,20 @@ public void testExplainWithFields() throws Exception { assertThat(response.getGetResult().getFields().get("obj1.field1").getValue().toString(), equalTo("value1")); assertThat(response.getGetResult().isSourceEmpty(), equalTo(false)); - response = client().prepareExplain(indexOrAlias(), "test", "1") + response = client().prepareExplain(indexOrAlias(), "1") .setQuery(QueryBuilders.matchAllQuery()) .setStoredFields("obj1.field1", "obj1.field2") .get(); assertNotNull(response); assertTrue(response.isMatch()); - String v1 = (String) response.getGetResult().field("obj1.field1").getValue(); - String v2 = (String) response.getGetResult().field("obj1.field2").getValue(); + String v1 = response.getGetResult().field("obj1.field1").getValue(); + String v2 = response.getGetResult().field("obj1.field2").getValue(); assertThat(v1, equalTo("value1")); assertThat(v2, equalTo("value2")); } @SuppressWarnings("unchecked") - public void testExplainWitSource() throws Exception { + public void testExplainWithSource() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); ensureGreen("test"); @@ -190,7 +185,7 @@ public void testExplainWitSource() throws Exception { .get(); refresh(); - ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1") + ExplainResponse response = client().prepareExplain(indexOrAlias(), "1") .setQuery(QueryBuilders.matchAllQuery()) .setFetchSource("obj1.field1", null) .get(); @@ -204,7 +199,7 @@ public void testExplainWitSource() throws Exception { assertThat(response.getGetResult().getSource().size(), equalTo(1)); assertThat(((Map) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1")); - response = client().prepareExplain(indexOrAlias(), "test", "1") + response = client().prepareExplain(indexOrAlias(), "1") .setQuery(QueryBuilders.matchAllQuery()) .setFetchSource(null, "obj1.field2") .get(); @@ -213,7 +208,7 @@ public void testExplainWitSource() throws Exception { assertThat(((Map) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1")); } - public void testExplainWithFilteredAlias() throws Exception { + public void testExplainWithFilteredAlias() { assertAcked( prepareCreate("test").addMapping("test", "field2", "type=text") .addAlias(new Alias("alias1").filter(QueryBuilders.termQuery("field2", "value2"))) @@ -223,7 +218,7 @@ public void testExplainWithFilteredAlias() throws Exception { client().prepareIndex("test", "test", "1").setSource("field1", "value1", "field2", "value1").get(); refresh(); - ExplainResponse response = client().prepareExplain("alias1", "test", "1").setQuery(QueryBuilders.matchAllQuery()).get(); + ExplainResponse response = client().prepareExplain("alias1", "1").setQuery(QueryBuilders.matchAllQuery()).get(); assertNotNull(response); assertTrue(response.isExists()); assertFalse(response.isMatch()); @@ -242,7 +237,7 @@ public void testExplainWithFilteredAliasFetchSource() throws Exception { client().prepareIndex("test", "test", "1").setSource("field1", "value1", "field2", "value1").get(); refresh(); - ExplainResponse response = client().prepareExplain("alias1", "test", "1") + ExplainResponse response = client().prepareExplain("alias1", "1") .setQuery(QueryBuilders.matchAllQuery()) .setFetchSource(true) .get(); @@ -251,11 +246,9 @@ public void testExplainWithFilteredAliasFetchSource() throws Exception { assertTrue(response.isExists()); assertFalse(response.isMatch()); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getType(), equalTo("test")); assertThat(response.getId(), equalTo("1")); assertThat(response.getGetResult(), notNullValue()); assertThat(response.getGetResult().getIndex(), equalTo("test")); - assertThat(response.getGetResult().getType(), equalTo("test")); assertThat(response.getGetResult().getId(), equalTo("1")); assertThat(response.getGetResult().getSource(), notNullValue()); assertThat((String) response.getGetResult().getSource().get("field1"), equalTo("value1")); @@ -272,9 +265,7 @@ public void testExplainDateRangeInQueryString() { refresh(); - ExplainResponse explainResponse = client().prepareExplain("test", "type", "1") - .setQuery(queryStringQuery("past:[now-2M/d TO now/d]")) - .get(); + ExplainResponse explainResponse = client().prepareExplain("test", "1").setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get(); assertThat(explainResponse.isExists(), equalTo(true)); assertThat(explainResponse.isMatch(), equalTo(true)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java index f306425fc9458..4c0fa15a55824 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java @@ -214,7 +214,7 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> trying to get the indexed document on the first index"); - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> closing test index..."); @@ -255,7 +255,7 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> trying to get the indexed document on the first round (before close and shutdown)"); - getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> indexing a simple document"); diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java index 23432b4bd15b1..e9414fd651ca0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java @@ -33,7 +33,6 @@ package org.opensearch.gateway; import org.opensearch.LegacyESVersion; -import org.opensearch.action.admin.indices.flush.SyncedFlushResponse; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -98,7 +97,6 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { .prepareCreate(indexName) .setSettings( Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.getKey(), 1.0f) @@ -197,10 +195,6 @@ public void testRecentPrimaryInformation() throws Exception { .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("f", "v")) .collect(Collectors.toList()) ); - assertBusy(() -> { - SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get(); - assertThat(syncedFlushResponse.successfulShards(), equalTo(2)); - }); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica)); if (randomBoolean()) { indexRandom( @@ -280,7 +274,6 @@ public void testFullClusterRestartPerformNoopRecovery() throws Exception { .prepareCreate(indexName) .setSettings( Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), randomIntBetween(10, 100) + "kb") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) @@ -342,7 +335,6 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { .prepareCreate(indexName) .setSettings( Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), randomIntBetween(10, 100) + "kb") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) @@ -473,7 +465,6 @@ public void testPeerRecoveryForClosedIndices() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") .build() @@ -552,12 +543,6 @@ public void testSimulateRecoverySourceOnOldNode() throws Exception { if (randomBoolean()) { client().admin().indices().prepareFlush(indexName).get(); } - if (randomBoolean()) { - assertBusy(() -> { - SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get(); - assertThat(syncedFlushResponse.successfulShards(), equalTo(1)); - }); - } internalCluster().startDataOnlyNode(); MockTransportService transportService = (MockTransportService) internalCluster().getInstance(TransportService.class, source); Semaphore failRecovery = new Semaphore(1); @@ -591,10 +576,11 @@ public void testSimulateRecoverySourceOnOldNode() throws Exception { transportService.clearAllRules(); } - private void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception { + public static void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception { + final ClusterService clusterService = internalCluster().clusterService(); assertBusy(() -> { Index index = resolveIndex(indexName); - Set activeRetentionLeaseIds = clusterService().state() + Set activeRetentionLeaseIds = clusterService.state() .routingTable() .index(index) .shard(0) diff --git a/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java index f8079aa1d93f3..327e35dbc7d0b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java @@ -90,25 +90,25 @@ public void testSimpleGet() { ); ensureGreen(); - GetResponse response = client().prepareGet(indexOrAlias(), "type1", "1").get(); + GetResponse response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get(); logger.info("--> non realtime get 1"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setRealtime(false).get(); + response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); assertThat(response.isExists(), equalTo(false)); logger.info("--> realtime get 1"); - response = client().prepareGet(indexOrAlias(), "type1", "1").get(); + response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1")); assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2")); logger.info("--> realtime get 1 (no source, implicit)"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields(Strings.EMPTY_ARRAY).get(); + response = client().prepareGet(indexOrAlias(), "1").setStoredFields(Strings.EMPTY_ARRAY).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); Set fields = new HashSet<>(response.getFields().keySet()); @@ -116,7 +116,7 @@ public void testSimpleGet() { assertThat(response.getSourceAsBytes(), nullValue()); logger.info("--> realtime get 1 (no source, explicit)"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setFetchSource(false).get(); + response = client().prepareGet(indexOrAlias(), "1").setFetchSource(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); fields = new HashSet<>(response.getFields().keySet()); @@ -124,14 +124,14 @@ public void testSimpleGet() { assertThat(response.getSourceAsBytes(), nullValue()); logger.info("--> realtime get 1 (no type)"); - response = client().prepareGet(indexOrAlias(), null, "1").get(); + response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1")); assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2")); logger.info("--> realtime fetch of field"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields("field1").get(); + response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsBytes(), nullValue()); @@ -139,7 +139,7 @@ public void testSimpleGet() { assertThat(response.getField("field2"), nullValue()); logger.info("--> realtime fetch of field & source"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields("field1").setFetchSource("field1", null).get(); + response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").setFetchSource("field1", null).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsMap(), hasKey("field1")); @@ -148,7 +148,7 @@ public void testSimpleGet() { assertThat(response.getField("field2"), nullValue()); logger.info("--> realtime get 1"); - response = client().prepareGet(indexOrAlias(), "type1", "1").get(); + response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1")); @@ -158,14 +158,14 @@ public void testSimpleGet() { refresh(); logger.info("--> non realtime get 1 (loaded from index)"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setRealtime(false).get(); + response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1")); assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2")); logger.info("--> realtime fetch of field (loaded from index)"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields("field1").get(); + response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsBytes(), nullValue()); @@ -173,7 +173,7 @@ public void testSimpleGet() { assertThat(response.getField("field2"), nullValue()); logger.info("--> realtime fetch of field & source (loaded from index)"); - response = client().prepareGet(indexOrAlias(), "type1", "1").setStoredFields("field1").setFetchSource(true).get(); + response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").setFetchSource(true).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsBytes(), not(nullValue())); @@ -184,7 +184,7 @@ public void testSimpleGet() { client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").get(); logger.info("--> realtime get 1"); - response = client().prepareGet(indexOrAlias(), "type1", "1").get(); + response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_1")); @@ -193,7 +193,7 @@ public void testSimpleGet() { logger.info("--> update doc 1 again"); client().prepareIndex("test", "type1", "1").setSource("field1", "value1_2", "field2", "value2_2").get(); - response = client().prepareGet(indexOrAlias(), "type1", "1").get(); + response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_2")); @@ -202,7 +202,7 @@ public void testSimpleGet() { DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - response = client().prepareGet(indexOrAlias(), "type1", "1").get(); + response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(false)); } @@ -222,7 +222,7 @@ public void testGetWithAliasPointingToMultipleIndices() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> client().prepareGet("alias1", "type", "_alias_id").get() + () -> client().prepareGet("alias1", "_alias_id").get() ); assertThat(exception.getMessage(), endsWith("can't execute a single index op")); } @@ -239,7 +239,7 @@ public void testSimpleMultiGet() throws Exception { ); ensureGreen(); - MultiGetResponse response = client().prepareMultiGet().add(indexOrAlias(), "type1", "1").get(); + MultiGetResponse response = client().prepareMultiGet().add(indexOrAlias(), "1").get(); assertThat(response.getResponses().length, equalTo(1)); assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); @@ -248,11 +248,11 @@ public void testSimpleMultiGet() throws Exception { } response = client().prepareMultiGet() - .add(indexOrAlias(), "type1", "1") - .add(indexOrAlias(), "type1", "15") - .add(indexOrAlias(), "type1", "3") - .add(indexOrAlias(), "type1", "9") - .add(indexOrAlias(), "type1", "11") + .add(indexOrAlias(), "1") + .add(indexOrAlias(), "15") + .add(indexOrAlias(), "3") + .add(indexOrAlias(), "9") + .add(indexOrAlias(), "11") .get(); assertThat(response.getResponses().length, equalTo(5)); assertThat(response.getResponses()[0].getId(), equalTo("1")); @@ -278,8 +278,8 @@ public void testSimpleMultiGet() throws Exception { // multi get with specific field response = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").storedFields("field")) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "3").storedFields("field")) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").storedFields("field")) + .add(new MultiGetRequest.Item(indexOrAlias(), "3").storedFields("field")) .get(); assertThat(response.getResponses().length, equalTo(2)); @@ -304,16 +304,15 @@ public void testGetDocWithMultivaluedFields() throws Exception { assertAcked(prepareCreate("test").addMapping("type1", mapping1, XContentType.JSON)); ensureGreen(); - GetResponse response = client().prepareGet("test", "type1", "1").get(); + GetResponse response = client().prepareGet("test", "1").get(); assertThat(response.isExists(), equalTo(false)); assertThat(response.isExists(), equalTo(false)); client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); - response = client().prepareGet("test", "type1", "1").setStoredFields("field").get(); + response = client().prepareGet("test", "1").setStoredFields("field").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); - assertThat(response.getType(), equalTo("type1")); Set fields = new HashSet<>(response.getFields().keySet()); assertThat(fields, equalTo(singleton("field"))); assertThat(response.getFields().get("field").getValues().size(), equalTo(2)); @@ -322,7 +321,7 @@ public void testGetDocWithMultivaluedFields() throws Exception { // Now test values being fetched from stored fields. refresh(); - response = client().prepareGet("test", "type1", "1").setStoredFields("field").get(); + response = client().prepareGet("test", "1").setStoredFields("field").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); fields = new HashSet<>(response.getFields().keySet()); @@ -336,7 +335,7 @@ public void testGetWithVersion() { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSettings(Settings.builder().put("index.refresh_interval", -1))); ensureGreen(); - GetResponse response = client().prepareGet("test", "type1", "1").get(); + GetResponse response = client().prepareGet("test", "1").get(); assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); @@ -344,18 +343,18 @@ public void testGetWithVersion() { // From translog: - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getVersion(), equalTo(1L)); - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(1).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getVersion(), equalTo(1L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(2).get(); fail(); } catch (VersionConflictEngineException e) { // all good @@ -364,20 +363,20 @@ public void testGetWithVersion() { // From Lucene index: refresh(); - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(1L)); - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(1).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(1L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(2).setRealtime(false).get(); fail(); } catch (VersionConflictEngineException e) { // all good @@ -388,20 +387,20 @@ public void testGetWithVersion() { // From translog: - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(2L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(1).get(); fail(); } catch (VersionConflictEngineException e) { // all good } - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(2).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); @@ -410,20 +409,20 @@ public void testGetWithVersion() { // From Lucene index: refresh(); - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); assertThat(response.getVersion(), equalTo(2L)); try { - client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get(); + client().prepareGet(indexOrAlias(), "1").setVersion(1).setRealtime(false).get(); fail(); } catch (VersionConflictEngineException e) { // all good } - response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get(); + response = client().prepareGet(indexOrAlias(), "1").setVersion(2).setRealtime(false).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getId(), equalTo("1")); assertThat(response.getIndex(), equalTo("test")); @@ -434,7 +433,7 @@ public void testMultiGetWithVersion() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSettings(Settings.builder().put("index.refresh_interval", -1))); ensureGreen(); - MultiGetResponse response = client().prepareMultiGet().add(indexOrAlias(), "type1", "1").get(); + MultiGetResponse response = client().prepareMultiGet().add(indexOrAlias(), "1").get(); assertThat(response.getResponses().length, equalTo(1)); assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); @@ -444,9 +443,9 @@ public void testMultiGetWithVersion() throws Exception { // Version from translog response = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(Versions.MATCH_ANY)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(1)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(2)) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").version(Versions.MATCH_ANY)) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").version(1)) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").version(2)) .get(); assertThat(response.getResponses().length, equalTo(3)); // [0] version doesn't matter, which is the default @@ -468,9 +467,9 @@ public void testMultiGetWithVersion() throws Exception { // Version from Lucene index refresh(); response = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(Versions.MATCH_ANY)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(1)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(2)) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").version(Versions.MATCH_ANY)) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").version(1)) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").version(2)) .setRealtime(false) .get(); assertThat(response.getResponses().length, equalTo(3)); @@ -494,9 +493,9 @@ public void testMultiGetWithVersion() throws Exception { // Version from translog response = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(Versions.MATCH_ANY)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(1)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(2)) + .add(new MultiGetRequest.Item(indexOrAlias(), "2").version(Versions.MATCH_ANY)) + .add(new MultiGetRequest.Item(indexOrAlias(), "2").version(1)) + .add(new MultiGetRequest.Item(indexOrAlias(), "2").version(2)) .get(); assertThat(response.getResponses().length, equalTo(3)); // [0] version doesn't matter, which is the default @@ -518,9 +517,9 @@ public void testMultiGetWithVersion() throws Exception { // Version from Lucene index refresh(); response = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(Versions.MATCH_ANY)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(1)) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(2)) + .add(new MultiGetRequest.Item(indexOrAlias(), "2").version(Versions.MATCH_ANY)) + .add(new MultiGetRequest.Item(indexOrAlias(), "2").version(1)) + .add(new MultiGetRequest.Item(indexOrAlias(), "2").version(2)) .setRealtime(false) .get(); assertThat(response.getResponses().length, equalTo(3)); @@ -569,16 +568,13 @@ public void testGetFieldsNonLeafField() throws Exception { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> client().prepareGet(indexOrAlias(), "my-type1", "1").setStoredFields("field1").get() + () -> client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get() ); assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field")); flush(); - exc = expectThrows( - IllegalArgumentException.class, - () -> client().prepareGet(indexOrAlias(), "my-type1", "1").setStoredFields("field1").get() - ); + exc = expectThrows(IllegalArgumentException.class, () -> client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get()); assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field")); } @@ -649,13 +645,13 @@ public void testGetFieldsComplexField() throws Exception { logger.info("checking real time retrieval"); String field = "field1.field2.field3.field4"; - GetResponse getResponse = client().prepareGet("my-index", "my-type", "1").setStoredFields(field).get(); + GetResponse getResponse = client().prepareGet("my-index", "1").setStoredFields(field).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getField(field).getValues().size(), equalTo(2)); assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1")); assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2")); - getResponse = client().prepareGet("my-index", "my-type", "1").setStoredFields(field).get(); + getResponse = client().prepareGet("my-index", "1").setStoredFields(field).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getField(field).getValues().size(), equalTo(2)); assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1")); @@ -680,7 +676,7 @@ public void testGetFieldsComplexField() throws Exception { logger.info("checking post-flush retrieval"); - getResponse = client().prepareGet("my-index", "my-type", "1").setStoredFields(field).get(); + getResponse = client().prepareGet("my-index", "1").setStoredFields(field).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getField(field).getValues().size(), equalTo(2)); assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1")); @@ -891,7 +887,7 @@ protected void assertGetFieldNull(String index, String type, String docId, Strin } private GetResponse multiGetDocument(String index, String type, String docId, String field, @Nullable String routing) { - MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).storedFields(field); + MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, docId).storedFields(field); if (routing != null) { getItem.routing(routing); } @@ -902,7 +898,7 @@ private GetResponse multiGetDocument(String index, String type, String docId, St } private GetResponse getDocument(String index, String type, String docId, String field, @Nullable String routing) { - GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setStoredFields(field); + GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setId(docId).setStoredFields(field); if (routing != null) { getRequestBuilder.setRouting(routing); } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java index 84e1231a7b8b4..359d40e3b7b9f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java @@ -224,7 +224,7 @@ public void testRequestPipelineAndFinalPipeline() { index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); final IndexResponse response = index.get(); assertThat(response.status(), equalTo(RestStatus.CREATED)); - final GetRequestBuilder get = client().prepareGet("index", "_doc", "1"); + final GetRequestBuilder get = client().prepareGet("index", "1"); final GetResponse getResponse = get.get(); assertTrue(getResponse.isExists()); final Map source = getResponse.getSourceAsMap(); @@ -252,7 +252,7 @@ public void testDefaultAndFinalPipeline() { index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); final IndexResponse response = index.get(); assertThat(response.status(), equalTo(RestStatus.CREATED)); - final GetRequestBuilder get = client().prepareGet("index", "_doc", "1"); + final GetRequestBuilder get = client().prepareGet("index", "1"); final GetResponse getResponse = get.get(); assertTrue(getResponse.isExists()); final Map source = getResponse.getSourceAsMap(); @@ -302,7 +302,7 @@ public void testDefaultAndFinalPipelineFromTemplates() { index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); final IndexResponse response = index.get(); assertThat(response.status(), equalTo(RestStatus.CREATED)); - final GetRequestBuilder get = client().prepareGet("index", "_doc", "1"); + final GetRequestBuilder get = client().prepareGet("index", "1"); final GetResponse getResponse = get.get(); assertTrue(getResponse.isExists()); final Map source = getResponse.getSourceAsMap(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java index 5c0abf72c3bf0..1cdf8e702aafa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java @@ -403,6 +403,8 @@ public void testShardIndexingPressureEnforcedEnabledDisabledSetting() throws Exc secondSuccessFuture = client(coordinatingOnlyNode).bulk(bulkRequest); Thread.sleep(25); + waitForTwoOutstandingRequests(coordinatingShardTracker); + // This request breaches the threshold and hence will be rejected expectThrows(OpenSearchRejectedExecutionException.class, () -> client(coordinatingOnlyNode).bulk(bulkRequest).actionGet()); @@ -636,6 +638,7 @@ public void testShardIndexingPressureLastSuccessfulSettingsUpdate() throws Excep IndexingPressureService.class, coordinatingOnlyNode ).getShardIndexingPressure().getShardIndexingPressureTracker(shardId); + waitForTwoOutstandingRequests(coordinatingShardTracker); expectThrows(OpenSearchRejectedExecutionException.class, () -> client(coordinatingOnlyNode).bulk(bulkRequest).actionGet()); assertEquals(1, coordinatingShardTracker.getCoordinatingOperationTracker().getRejectionTracker().getTotalRejections()); assertEquals( @@ -648,6 +651,7 @@ public void testShardIndexingPressureLastSuccessfulSettingsUpdate() throws Excep ShardIndexingPressureTracker primaryShardTracker = internalCluster().getInstance(IndexingPressureService.class, primaryName) .getShardIndexingPressure() .getShardIndexingPressureTracker(shardId); + waitForTwoOutstandingRequests(primaryShardTracker); expectThrows(OpenSearchRejectedExecutionException.class, () -> client(primaryName).bulk(bulkRequest).actionGet()); assertEquals(1, primaryShardTracker.getCoordinatingOperationTracker().getRejectionTracker().getTotalRejections()); assertEquals( @@ -920,6 +924,12 @@ private String getCoordinatingOnlyNode() { .getName(); } + private static void waitForTwoOutstandingRequests(ShardIndexingPressureTracker tracker) throws Exception { + assertBusy( + () -> { assertEquals(tracker.getCoordinatingOperationTracker().getPerformanceTracker().getTotalOutstandingRequests(), 2); } + ); + } + private void restartCluster(Settings settings) throws Exception { internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/index/engine/InternalEngineMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/index/engine/InternalEngineMergeIT.java index 47d7e974357d8..06ec4dc6d2812 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/engine/InternalEngineMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/engine/InternalEngineMergeIT.java @@ -71,7 +71,6 @@ public void testMergesHappening() throws Exception { for (int j = 0; j < numDocs; ++j) { request.add( Requests.indexRequest("test") - .type("type1") .id(Long.toString(id++)) .source(jsonBuilder().startObject().field("l", randomLong()).endObject()) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java index 28d92909a7f93..cb01295ae734c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java @@ -93,10 +93,10 @@ public void testConflictingDynamicMappingsBulk() { assertTrue(bulkResponse.hasFailures()); } - private static void assertMappingsHaveField(GetMappingsResponse mappings, String index, String type, String field) throws IOException { + private static void assertMappingsHaveField(GetMappingsResponse mappings, String index, String field) throws IOException { ImmutableOpenMap indexMappings = mappings.getMappings().get("index"); assertNotNull(indexMappings); - MappingMetadata typeMappings = indexMappings.get(type); + MappingMetadata typeMappings = indexMappings.get(MapperService.SINGLE_MAPPING_NAME); assertNotNull(typeMappings); Map typeMappingsMap = typeMappings.getSourceAsMap(); Map properties = (Map) typeMappingsMap.get("properties"); @@ -134,12 +134,12 @@ public void run() { throw error.get(); } Thread.sleep(2000); - GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get(); + GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").get(); for (int i = 0; i < indexThreads.length; ++i) { - assertMappingsHaveField(mappings, "index", "type", "field" + i); + assertMappingsHaveField(mappings, "index", "field" + i); } for (int i = 0; i < indexThreads.length; ++i) { - assertTrue(client().prepareGet("index", "type", Integer.toString(i)).get().isExists()); + assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists()); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java index e1c56129c9f4b..ed6074b39c8a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java @@ -43,7 +43,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.IndicesService; @@ -122,7 +121,7 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final ActionListener listener = countDownLatchListener(latch); // simulate a peer recovery which locks the soft deletes policy on the primary - final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock(Engine.HistorySource.INDEX) : () -> {}; + final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock() : () -> {}; currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); retentionLock.close(); @@ -175,7 +174,7 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final ActionListener listener = countDownLatchListener(latch); // simulate a peer recovery which locks the soft deletes policy on the primary - final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock(Engine.HistorySource.INDEX) : () -> {}; + final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock() : () -> {}; currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); retentionLock.close(); @@ -186,7 +185,7 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { final CountDownLatch latch = new CountDownLatch(1); primary.removeRetentionLease(id, countDownLatchListener(latch)); // simulate a peer recovery which locks the soft deletes policy on the primary - final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock(Engine.HistorySource.INDEX) : () -> {}; + final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock() : () -> {}; currentRetentionLeases.remove(id); latch.await(); retentionLock.close(); @@ -346,22 +345,16 @@ public void testBackgroundRetentionLeaseSync() throws Exception { ) ); } - assertBusy( - () -> { - // check all retention leases have been synced to all replicas - for (final ShardRouting replicaShard : clusterService().state() - .routingTable() - .index("index") - .shard(0) - .replicaShards()) { - final String replicaShardNodeId = replicaShard.currentNodeId(); - final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName(); - final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName) - .getShardOrNull(new ShardId(resolveIndex("index"), 0)); - assertThat(replica.getRetentionLeases(), equalTo(primary.getRetentionLeases())); - } + assertBusy(() -> { + // check all retention leases have been synced to all replicas + for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { + final String replicaShardNodeId = replicaShard.currentNodeId(); + final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName(); + final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + assertThat(replica.getRetentionLeases(), equalTo(primary.getRetentionLeases())); } - ); + }); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index ff234694a441c..fa0fcfdbea628 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -36,9 +36,6 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.cluster.node.stats.NodeStats; -import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.opensearch.action.admin.indices.stats.IndexStats; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; @@ -58,7 +55,6 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; @@ -77,7 +73,6 @@ import org.opensearch.index.engine.CommitStats; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.NoOpEngine; -import org.opensearch.index.engine.SegmentsStats; import org.opensearch.index.flush.FlushStats; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -87,11 +82,9 @@ import org.opensearch.index.translog.TranslogStats; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.breaker.CircuitBreakerStats; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.TransportCheckpointPublisher; import org.opensearch.plugins.Plugin; -import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.DummyShardLock; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -124,7 +117,6 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.BREAKER; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.NONE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -137,13 +129,11 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; public class IndexShardIT extends OpenSearchSingleNodeTestCase { @@ -180,24 +170,6 @@ public void testLockTryingToDelete() throws Exception { } } - public void testMarkAsInactiveTriggersSyncedFlush() throws Exception { - assertAcked( - client().admin() - .indices() - .prepareCreate("test") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - ); - client().prepareIndex("test", "test").setSource("{}", XContentType.JSON).get(); - ensureGreen("test"); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); - assertBusy(() -> { - IndexStats indexStats = client().admin().indices().prepareStats("test").clear().setTranslog(true).get().getIndex("test"); - assertThat(indexStats.getTotal().translog.getUncommittedOperations(), equalTo(0)); - indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); - }); - } - public void testDurableFlagHasEffect() throws Exception { createIndex("test"); ensureGreen(); @@ -663,86 +635,6 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul } } - /** Check that the accounting breaker correctly matches the segments API for memory usage */ - private void checkAccountingBreaker() { - CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class); - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - long usedMem = acctBreaker.getUsed(); - assertThat(usedMem, greaterThan(0L)); - NodesStatsResponse response = client().admin().cluster().prepareNodesStats().setIndices(true).addMetric(BREAKER.metricName()).get(); - NodeStats stats = response.getNodes().get(0); - assertNotNull(stats); - SegmentsStats segmentsStats = stats.getIndices().getSegments(); - CircuitBreakerStats breakerStats = stats.getBreaker().getStats(CircuitBreaker.ACCOUNTING); - assertEquals(usedMem, segmentsStats.getMemoryInBytes()); - assertEquals(usedMem, breakerStats.getEstimated()); - } - - public void testCircuitBreakerIncrementedByIndexShard() throws Exception { - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)) - .get(); - - // Generate a couple of segments - client().prepareIndex("test", "_doc", "1") - .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE) - .get(); - // Use routing so 2 documents are guaranteed to be on the same shard - String routing = randomAlphaOfLength(5); - client().prepareIndex("test", "_doc", "2") - .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE) - .setRouting(routing) - .get(); - client().prepareIndex("test", "_doc", "3") - .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE) - .setRouting(routing) - .get(); - - checkAccountingBreaker(); - // Test that force merging causes the breaker to be correctly adjusted - logger.info("--> force merging to a single segment"); - client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).setFlush(randomBoolean()).get(); - client().admin().indices().prepareRefresh().get(); - checkAccountingBreaker(); - - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("indices.breaker.total.limit", "1kb")) - .get(); - - // Test that we're now above the parent limit due to the segments - Exception e = expectThrows( - Exception.class, - () -> client().prepareSearch("test").addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get() - ); - logger.info("--> got an expected exception", e); - assertThat(e.getCause(), notNullValue()); - assertThat(e.getCause().getMessage(), containsString("[parent] Data too large, data for []")); - - client().admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().putNull("indices.breaker.total.limit").putNull("network.breaker.inflight_requests.overhead") - ) - .get(); - - // Test that deleting the index causes the breaker to correctly be decremented - logger.info("--> deleting index"); - client().admin().indices().prepareDelete("test").get(); - - // Accounting breaker should now be 0 - CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class); - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(acctBreaker.getUsed(), equalTo(0L)); - } - public static final IndexShard recoverShard(IndexShard newShard) throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); @@ -912,7 +804,6 @@ public void testLimitNumberOfRetainedTranslogFiles() throws Exception { Settings.Builder settings = Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, 1) .put(SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) .put(IndexSettings.INDEX_TRANSLOG_RETENTION_TOTAL_FILES_SETTING.getKey(), translogRetentionTotalFiles); if (randomBoolean()) { settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1024 * 1024))); diff --git a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java index ffc8e74875c58..3819b42e799ed 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java @@ -99,7 +99,7 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { } try { logger.debug("running search with a specific type"); - SearchResponse response = client().prepareSearch("test").setTypes("type").get(); + SearchResponse response = client().prepareSearch("test").get(); if (response.getHits().getTotalHits().value != numOfDocs) { final String message = "Count is " + response.getHits().getTotalHits().value diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java index ec90d271b9127..19e1e196daad0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java @@ -80,22 +80,22 @@ public void testIndexNameDateMathExpressions() { assertHitCount(searchResponse, 3); assertSearchHits(searchResponse, "1", "2", "3"); - GetResponse getResponse = client().prepareGet(dateMathExp1, "type", "1").get(); + GetResponse getResponse = client().prepareGet(dateMathExp1, "1").get(); assertThat(getResponse.isExists(), is(true)); assertThat(getResponse.getId(), equalTo("1")); - getResponse = client().prepareGet(dateMathExp2, "type", "2").get(); + getResponse = client().prepareGet(dateMathExp2, "2").get(); assertThat(getResponse.isExists(), is(true)); assertThat(getResponse.getId(), equalTo("2")); - getResponse = client().prepareGet(dateMathExp3, "type", "3").get(); + getResponse = client().prepareGet(dateMathExp3, "3").get(); assertThat(getResponse.isExists(), is(true)); assertThat(getResponse.getId(), equalTo("3")); MultiGetResponse mgetResponse = client().prepareMultiGet() - .add(dateMathExp1, "type", "1") - .add(dateMathExp2, "type", "2") - .add(dateMathExp3, "type", "3") + .add(dateMathExp1, "1") + .add(dateMathExp2, "2") + .add(dateMathExp3, "3") .get(); assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true)); assertThat(mgetResponse.getResponses()[0].getResponse().getId(), equalTo("1")); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/flush/FlushIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/flush/FlushIT.java deleted file mode 100644 index 277e83fa51379..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/indices/flush/FlushIT.java +++ /dev/null @@ -1,495 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.flush; - -import org.apache.lucene.index.Term; -import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.flush.SyncedFlushResponse; -import org.opensearch.action.admin.indices.stats.IndexStats; -import org.opensearch.action.admin.indices.stats.ShardStats; -import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.opensearch.common.UUIDs; -import org.opensearch.common.ValidationException; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.Index; -import org.opensearch.index.IndexService; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.engine.Engine; -import org.opensearch.index.engine.InternalEngine; -import org.opensearch.index.engine.InternalEngineTests; -import org.opensearch.index.mapper.ParsedDocument; -import org.opensearch.index.mapper.Uid; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.index.shard.ShardId; -import org.opensearch.indices.IndexingMemoryController; -import org.opensearch.indices.IndicesService; -import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.InternalTestCluster; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.emptyArray; -import static org.hamcrest.Matchers.emptyIterable; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class FlushIT extends OpenSearchIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singletonList(InternalSettingsPlugin.class); - } - - public void testWaitIfOngoing() throws InterruptedException { - createIndex("test"); - ensureGreen("test"); - final int numIters = scaledRandomIntBetween(10, 30); - for (int i = 0; i < numIters; i++) { - for (int j = 0; j < 10; j++) { - client().prepareIndex("test", "test").setSource("{}", XContentType.JSON).get(); - } - final CountDownLatch latch = new CountDownLatch(10); - final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); - for (int j = 0; j < 10; j++) { - client().admin().indices().prepareFlush("test").execute(new ActionListener() { - @Override - public void onResponse(FlushResponse flushResponse) { - try { - // don't use assertAllSuccessful it uses a randomized context that belongs to a different thread - assertThat( - "Unexpected ShardFailures: " + Arrays.toString(flushResponse.getShardFailures()), - flushResponse.getFailedShards(), - equalTo(0) - ); - latch.countDown(); - } catch (Exception ex) { - onFailure(ex); - } - - } - - @Override - public void onFailure(Exception e) { - errors.add(e); - latch.countDown(); - } - }); - } - latch.await(); - assertThat(errors, emptyIterable()); - } - } - - public void testRejectIllegalFlushParameters() { - createIndex("test"); - int numDocs = randomIntBetween(0, 10); - for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test", "_doc").setSource("{}", XContentType.JSON).get(); - } - assertThat( - expectThrows( - ValidationException.class, - () -> client().admin().indices().flush(new FlushRequest().force(true).waitIfOngoing(false)).actionGet() - ).getMessage(), - containsString("wait_if_ongoing must be true for a force flush") - ); - assertThat( - client().admin().indices().flush(new FlushRequest().force(true).waitIfOngoing(true)).actionGet().getShardFailures(), - emptyArray() - ); - assertThat( - client().admin().indices().flush(new FlushRequest().force(false).waitIfOngoing(randomBoolean())).actionGet().getShardFailures(), - emptyArray() - ); - } - - public void testSyncedFlush() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(2); - prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)).get(); - ensureGreen(); - - final Index index = client().admin().cluster().prepareState().get().getState().metadata().index("test").getIndex(); - - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - - ShardsSyncedFlushResult result; - if (randomBoolean()) { - logger.info("--> sync flushing shard 0"); - result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0)); - } else { - logger.info("--> sync flushing index [test]"); - SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); - result = indicesResult.getShardsResultPerIndex().get("test").get(0); - } - assertFalse(result.failed()); - assertThat(result.totalShards(), equalTo(indexStats.getShards().length)); - assertThat(result.successfulShards(), equalTo(indexStats.getShards().length)); - - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - String syncId = result.syncId(); - for (ShardStats shardStats : indexStats.getShards()) { - final String shardSyncId = shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID); - assertThat(shardSyncId, equalTo(syncId)); - } - - // now, start new node and relocate a shard there and see if sync id still there - String newNodeName = internalCluster().startNode(); - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next(); - String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).getName(); - assertFalse(currentNodeName.equals(newNodeName)); - internalCluster().client() - .admin() - .cluster() - .prepareReroute() - .add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName)) - .get(); - - client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get(); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - - client().admin() - .indices() - .prepareUpdateSettings("test") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()) - .get(); - ensureGreen("test"); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - client().admin() - .indices() - .prepareUpdateSettings("test") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1).build()) - .get(); - ensureGreen("test"); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - - public void testSyncedFlushWithConcurrentIndexing() throws Exception { - - internalCluster().ensureAtLeastNumDataNodes(3); - createIndex("test"); - - client().admin() - .indices() - .prepareUpdateSettings("test") - .setSettings( - Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) - .put("index.refresh_interval", -1) - .put("index.number_of_replicas", internalCluster().numDataNodes() - 1) - ) - .get(); - ensureGreen(); - final AtomicBoolean stop = new AtomicBoolean(false); - final AtomicInteger numDocs = new AtomicInteger(0); - Thread indexingThread = new Thread() { - @Override - public void run() { - while (stop.get() == false) { - client().prepareIndex().setIndex("test").setType("_doc").setSource("{}", XContentType.JSON).get(); - numDocs.incrementAndGet(); - } - } - }; - indexingThread.start(); - - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - logger.info("--> trying sync flush"); - SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get(); - logger.info("--> sync flush done"); - stop.set(true); - indexingThread.join(); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - assertFlushResponseEqualsShardStats(indexStats.getShards(), syncedFlushResult.getShardsResultPerIndex().get("test")); - refresh(); - assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get())); - logger.info("indexed {} docs", client().prepareSearch().setSize(0).get().getHits().getTotalHits().value); - logClusterState(); - internalCluster().fullRestart(); - ensureGreen(); - assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get())); - } - - private void assertFlushResponseEqualsShardStats(ShardStats[] shardsStats, List syncedFlushResults) { - - for (final ShardStats shardStats : shardsStats) { - for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) { - if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) { - for (Map.Entry singleResponse : shardResult.shardResponses() - .entrySet()) { - if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) { - if (singleResponse.getValue().success()) { - logger.info( - "{} sync flushed on node {}", - singleResponse.getKey().shardId(), - singleResponse.getKey().currentNodeId() - ); - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } else { - logger.info( - "{} sync flush failed for on node {}", - singleResponse.getKey().shardId(), - singleResponse.getKey().currentNodeId() - ); - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - } - } - } - } - } - - public void testUnallocatedShardsDoesNotHang() throws InterruptedException { - // create an index but disallow allocation - prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE) - .setSettings(Settings.builder().put("index.routing.allocation.include._name", "nonexistent")) - .get(); - - // this should not hang but instead immediately return with empty result set - List shardsResult = client().admin() - .indices() - .prepareSyncedFlush("test") - .get() - .getShardsResultPerIndex() - .get("test"); - // just to make sure the test actually tests the right thing - int numShards = client().admin() - .indices() - .prepareGetSettings("test") - .get() - .getIndexToSettings() - .get("test") - .getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, -1); - assertThat(shardsResult.size(), equalTo(numShards)); - assertThat(shardsResult.get(0).failureReason(), equalTo("no active shards")); - } - - private void indexDoc(Engine engine, String id) throws IOException { - final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null); - final Engine.IndexResult indexResult = engine.index( - new Engine.Index( - new Term("_id", Uid.encodeId(doc.id())), - doc, - ((InternalEngine) engine).getProcessedLocalCheckpoint() + 1, - 1L, - 1L, - null, - Engine.Operation.Origin.REPLICA, - System.nanoTime(), - -1L, - false, - SequenceNumbers.UNASSIGNED_SEQ_NO, - 0 - ) - ); - assertThat(indexResult.getFailure(), nullValue()); - engine.syncTranslog(); - } - - public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); - final int numberOfReplicas = internalCluster().numDataNodes() - 1; - assertAcked( - prepareCreate("test").setSettings( - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) - ).get() - ); - ensureGreen(); - final Index index = clusterService().state().metadata().index("test").getIndex(); - final ShardId shardId = new ShardId(index, 0); - final int numDocs = between(1, 10); - for (int i = 0; i < numDocs; i++) { - index("test", "doc", Integer.toString(i)); - } - final List indexShards = internalCluster().nodesInclude("test") - .stream() - .map(node -> internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId)) - .collect(Collectors.toList()); - // Index extra documents to one replica - synced-flush should fail on that replica. - final IndexShard outOfSyncReplica = randomValueOtherThanMany(s -> s.routingEntry().primary(), () -> randomFrom(indexShards)); - final int extraDocs = between(1, 10); - for (int i = 0; i < extraDocs; i++) { - indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); - } - final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); - assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); - assertThat( - partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, - equalTo( - "ongoing indexing operations: num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]" - ) - ); - // Index extra documents to all shards - synced-flush should be ok. - for (IndexShard indexShard : indexShards) { - // Do reindex documents to the out of sync replica to avoid trigger merges - if (indexShard != outOfSyncReplica) { - for (int i = 0; i < extraDocs; i++) { - indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i); - } - } - } - final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1)); - assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); - } - - public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); - final int numberOfReplicas = internalCluster().numDataNodes() - 1; - assertAcked( - prepareCreate("test").setSettings( - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) - ).get() - ); - ensureGreen(); - final Index index = clusterService().state().metadata().index("test").getIndex(); - final ShardId shardId = new ShardId(index, 0); - final int numDocs = between(1, 10); - for (int i = 0; i < numDocs; i++) { - index("test", "doc", Integer.toString(i)); - } - final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - // Do not renew synced-flush - final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); - // Shards were updated, renew synced flush. - final int moreDocs = between(1, 10); - for (int i = 0; i < moreDocs; i++) { - index("test", "doc", "more-" + i); - } - final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); - // Manually remove or change sync-id, renew synced flush. - IndexShard shard = internalCluster().getInstance(IndicesService.class, randomFrom(internalCluster().nodesInclude("test"))) - .getShardOrNull(shardId); - if (randomBoolean()) { - // Change the existing sync-id of a single shard. - shard.syncFlush(UUIDs.randomBase64UUID(random()), shard.commitStats().getRawCommitId()); - assertThat(shard.commitStats().syncId(), not(equalTo(thirdSeal.syncId()))); - } else { - // Flush will create a new commit without sync-id - shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true)); - assertThat(shard.commitStats().syncId(), nullValue()); - } - final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); - } - - public void testFlushOnInactive() throws Exception { - final String indexName = "flush_on_inactive"; - List dataNodes = internalCluster().startDataOnlyNodes( - 2, - Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), randomTimeValue(10, 1000, "ms")).build() - ); - assertAcked( - client().admin() - .indices() - .prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(200, 500, "ms")) - .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(50, 200, "ms")) - .put("index.routing.allocation.include._name", String.join(",", dataNodes)) - .build() - ) - ); - ensureGreen(indexName); - int numDocs = randomIntBetween(1, 10); - for (int i = 0; i < numDocs; i++) { - client().prepareIndex(indexName, "_doc").setSource("f", "v").get(); - } - if (randomBoolean()) { - internalCluster().restartNode(randomFrom(dataNodes), new InternalTestCluster.RestartCallback()); - ensureGreen(indexName); - } - assertBusy(() -> { - for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getShards()) { - assertThat(shardStats.getStats().getTranslog().getUncommittedOperations(), equalTo(0)); - } - }, 30, TimeUnit.SECONDS); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java index 5ad516a6514fb..0afe067afb686 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -130,7 +130,7 @@ public void testDynamicUpdates() throws Exception { for (int rec = 0; rec < recCount; rec++) { String type = "type"; String fieldName = "field_" + type + "_" + rec; - assertConcreteMappingsOnAll("test", type, fieldName); + assertConcreteMappingsOnAll("test", fieldName); } client().admin() @@ -377,7 +377,7 @@ public void testPutMappingsWithBlocks() { * Waits until mappings for the provided fields exist on all nodes. Note, this waits for the current * started shards and checks for concrete mappings. */ - private void assertConcreteMappingsOnAll(final String index, final String type, final String... fieldNames) { + private void assertConcreteMappingsOnAll(final String index, final String... fieldNames) { Set nodes = internalCluster().nodesInclude(index); assertThat(nodes, Matchers.not(Matchers.emptyIterable())); for (String node : nodes) { @@ -390,17 +390,17 @@ private void assertConcreteMappingsOnAll(final String index, final String type, assertNotNull("field " + fieldName + " doesn't exists on " + node, fieldType); } } - assertMappingOnMaster(index, type, fieldNames); + assertMappingOnMaster(index, fieldNames); } /** * Waits for the given mapping type to exists on the master node. */ - private void assertMappingOnMaster(final String index, final String type, final String... fieldNames) { - GetMappingsResponse response = client().admin().indices().prepareGetMappings(index).setTypes(type).get(); + private void assertMappingOnMaster(final String index, final String... fieldNames) { + GetMappingsResponse response = client().admin().indices().prepareGetMappings(index).get(); ImmutableOpenMap mappings = response.getMappings().get(index); assertThat(mappings, notNullValue()); - MappingMetadata mappingMetadata = mappings.get(type); + MappingMetadata mappingMetadata = mappings.get(MapperService.SINGLE_MAPPING_NAME); assertThat(mappingMetadata, notNullValue()); Map mappingSource = mappingMetadata.getSourceAsMap(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 2c72379e6b711..0772bc2965c4c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -102,8 +102,6 @@ private void reset() { HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING @@ -405,7 +403,7 @@ public void testLimitsRequestSize() { int numRequests = inFlightRequestsLimit.bytesAsInt(); BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numRequests; i++) { - IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i)); + IndexRequest indexRequest = new IndexRequest("index").id(Integer.toString(i)); indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "num", i); bulkRequest.add(indexRequest); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 52e7e7b55bbd6..3bab909d3b7f3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -81,6 +81,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.gateway.ReplicaShardAllocatorIT; import org.opensearch.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -100,7 +101,6 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.NodeIndicesStats; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.indices.flush.SyncedFlushUtil; import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.node.NodeClosedException; import org.opensearch.node.RecoverySettingsChunkSizePlugin; @@ -138,7 +138,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; @@ -148,7 +147,6 @@ import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.Stream; import java.util.stream.StreamSupport; import static java.util.Collections.singletonMap; @@ -403,7 +401,23 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio final String nodeA = internalCluster().startNode(); logger.info("--> create index on node: {}", nodeA); - createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT).getShards()[0].getStats().getStore().size(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .build() + ); + + int numDocs = randomIntBetween(10, 200); + final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex(INDEX_NAME, INDEX_TYPE) + .setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + } + indexRandom(randomBoolean(), docs); logger.info("--> start node B"); // force a shard recovery from nodeA to nodeB @@ -425,8 +439,7 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio logger.info("--> start node C"); final String nodeC = internalCluster().startNode(); - // do sync flush to gen sync id - assertThat(client().admin().indices().prepareSyncedFlush(INDEX_NAME).get().failedShards(), equalTo(0)); + ReplicaShardAllocatorIT.ensureActivePeerRecoveryRetentionLeasesAdvanced(INDEX_NAME); // hold peer recovery on phase 2 after nodeB down CountDownLatch phase1ReadyBlocked = new CountDownLatch(1); @@ -1524,93 +1537,6 @@ public void testOngoingRecoveryAndMasterFailOver() throws Exception { ensureGreen(indexName); } - public void testRecoveryFlushReplica() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(3); - String indexName = "test-index"; - createIndex(indexName, Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build()); - int numDocs = randomIntBetween(0, 10); - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, numDocs).mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList()) - ); - assertAcked( - client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put("index.number_of_replicas", 1)) - ); - ensureGreen(indexName); - ShardId shardId = null; - for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) { - shardId = shardStats.getShardRouting().shardId(); - if (shardStats.getShardRouting().primary() == false) { - assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs)); - SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( - shardStats.getCommitStats().getUserData().entrySet() - ); - assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint())); - assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo())); - } - } - SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertBusy(() -> assertThat(client().admin().indices().prepareSyncedFlush(indexName).get().failedShards(), equalTo(0))); - assertAcked( - client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put("index.number_of_replicas", 2)) - ); - ensureGreen(indexName); - // Recovery should keep syncId if no indexing activity on the primary after synced-flush. - Set syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) - .map(shardStats -> shardStats.getCommitStats().syncId()) - .collect(Collectors.toSet()); - assertThat(syncIds, hasSize(1)); - } - - public void testRecoveryUsingSyncedFlushWithoutRetentionLease() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(2); - String indexName = "test-index"; - createIndex( - indexName, - Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") // do not reallocate the lost shard - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "100ms") // expire leases quickly - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") // sync frequently - .build() - ); - int numDocs = randomIntBetween(0, 10); - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, numDocs).mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList()) - ); - ensureGreen(indexName); - - final ShardId shardId = new ShardId(resolveIndex(indexName), 0); - assertThat(SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId).successfulShards(), equalTo(2)); - - final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - final ShardRouting shardToResync = randomFrom(clusterState.routingTable().shardRoutingTable(shardId).activeShards()); - internalCluster().restartNode( - clusterState.nodes().get(shardToResync.currentNodeId()).getName(), - new InternalTestCluster.RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - assertBusy( - () -> assertFalse( - client().admin().indices().prepareStats(indexName).get().getShards()[0].getRetentionLeaseStats() - .retentionLeases() - .contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardToResync)) - ) - ); - return super.onNodeStopped(nodeName); - } - } - ); - - ensureGreen(indexName); - } - public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); List nodes = randomSubsetOf( diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index db8f9ea360598..aebb891ae784b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -484,11 +484,7 @@ public void testRecoverExistingReplica() throws Exception { .collect(toList()) ); ensureGreen(indexName); - if (randomBoolean()) { - client().admin().indices().prepareFlush(indexName).get(); - } else { - client().admin().indices().prepareSyncedFlush(indexName).get(); - } + client().admin().indices().prepareFlush(indexName).get(); // index more documents while one shard copy is offline internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java index ae99949c62b3e..95a421b126bae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java @@ -49,7 +49,6 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryBuilders; import org.opensearch.rest.RestStatus; import org.opensearch.test.OpenSearchIntegTestCase; @@ -332,7 +331,7 @@ public void testOpenCloseWithDocs() throws IOException, ExecutionException, Inte // check the index still contains the records that we indexed client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); - SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, docs); } @@ -395,9 +394,6 @@ public void testOpenCloseIndexWithBlocks() { public void testTranslogStats() throws Exception { final String indexName = "test"; createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); - boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get( - client().admin().indices().prepareGetSettings(indexName).get().getIndexToSettings().get(indexName) - ); final int nbDocs = randomIntBetween(0, 50); int uncommittedOps = 0; @@ -419,7 +415,7 @@ public void testTranslogStats() throws Exception { assertThat(stats.getIndex(indexName), notNullValue()); assertThat( stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), - equalTo(softDeletesEnabled ? uncommittedTranslogOps : nbDocs) + equalTo(uncommittedTranslogOps) ); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(uncommittedTranslogOps)); }); @@ -435,10 +431,7 @@ public void testTranslogStats() throws Exception { .setTranslog(true) .get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat( - stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), - equalTo(softDeletesEnabled ? 0 : nbDocs) - ); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(0)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(0)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 4dd6646670bd0..cca01a9ec6dcb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -694,7 +694,7 @@ public void testSimpleStats() throws Exception { assertThat(stats.getTotal().getRefresh(), notNullValue()); // check get - GetResponse getResponse = client().prepareGet("test2", "type", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test2", "1").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); stats = client().admin().indices().prepareStats().execute().actionGet(); @@ -703,7 +703,7 @@ public void testSimpleStats() throws Exception { assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(0L)); // missing get - getResponse = client().prepareGet("test2", "type", "2").execute().actionGet(); + getResponse = client().prepareGet("test2", "2").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(false)); stats = client().admin().indices().prepareStats().execute().actionGet(); @@ -828,7 +828,6 @@ public void testSegmentsStats() { assertThat(stats.getTotal().getSegments(), notNullValue()); assertThat(stats.getTotal().getSegments().getCount(), equalTo((long) test1.totalNumShards)); - assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0L)); } public void testAllFlags() throws Exception { @@ -1080,40 +1079,6 @@ public void testGroupsParam() throws Exception { } - public void testTypesParam() throws Exception { - createIndex("test1"); - createIndex("test2"); - - ensureGreen(); - - client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("foo", "bar").execute().actionGet(); - client().prepareIndex("test2", "baz", Integer.toString(1)).setSource("foo", "bar").execute().actionGet(); - refresh(); - - IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); - - assertThat(stats.getTotal().indexing.getTotal().getIndexCount(), greaterThan(0L)); - assertThat(stats.getTotal().indexing.getTypeStats(), is(nullValue())); - - stats = builder.setTypes("bar").execute().actionGet(); - assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0L)); - assertThat(stats.getTotal().indexing.getTypeStats().containsKey("baz"), is(false)); - - stats = builder.setTypes("bar", "baz").execute().actionGet(); - assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0L)); - assertThat(stats.getTotal().indexing.getTypeStats().get("baz").getIndexCount(), greaterThan(0L)); - - stats = builder.setTypes("*").execute().actionGet(); - assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0L)); - assertThat(stats.getTotal().indexing.getTypeStats().get("baz").getIndexCount(), greaterThan(0L)); - - stats = builder.setTypes("*r").execute().actionGet(); - assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0L)); - assertThat(stats.getTotal().indexing.getTypeStats().containsKey("baz"), is(false)); - - } - private static void set(Flag flag, IndicesStatsRequestBuilder builder, boolean set) { switch (flag) { case Docs: @@ -1248,9 +1213,7 @@ public void testFilterCacheStats() throws Exception { client().prepareIndex("index", "type", "1").setSource("foo", "bar"), client().prepareIndex("index", "type", "2").setSource("foo", "baz") ); - if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { - persistGlobalCheckpoint("index"); // Need to persist the global checkpoint for the soft-deletes retention MP. - } + persistGlobalCheckpoint("index"); // Need to persist the global checkpoint for the soft-deletes retention MP. refresh(); ensureGreen(); @@ -1287,22 +1250,20 @@ public void testFilterCacheStats() throws Exception { // Here we are testing that a fully deleted segment should be dropped and its cached is evicted. // In order to instruct the merge policy not to keep a fully deleted segment, // we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything. - if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { - persistGlobalCheckpoint("index"); - assertBusy(() -> { - for (final ShardStats shardStats : client().admin().indices().prepareStats("index").get().getIndex("index").getShards()) { - final long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); - assertTrue( - shardStats.getRetentionLeaseStats() - .retentionLeases() - .leases() - .stream() - .allMatch(retentionLease -> retentionLease.retainingSequenceNumber() == maxSeqNo + 1) - ); - } - }); - flush("index"); - } + persistGlobalCheckpoint("index"); + assertBusy(() -> { + for (final ShardStats shardStats : client().admin().indices().prepareStats("index").get().getIndex("index").getShards()) { + final long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + assertTrue( + shardStats.getRetentionLeaseStats() + .retentionLeases() + .leases() + .stream() + .allMatch(retentionLease -> retentionLease.retainingSequenceNumber() == maxSeqNo + 1) + ); + } + }); + flush("index"); logger.info("--> force merging to a single segment"); ForceMergeResponse forceMergeResponse = client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java index f6a8f5fdfee90..c4a4227c0bc9c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java @@ -839,11 +839,10 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio .get(); client().prepareIndex("a1", "test", "test").setSource("{}", XContentType.JSON).get(); - BulkResponse response = client().prepareBulk().add(new IndexRequest("a2", "test", "test").source("{}", XContentType.JSON)).get(); + BulkResponse response = client().prepareBulk().add(new IndexRequest("a2").id("test").source("{}", XContentType.JSON)).get(); assertThat(response.hasFailures(), is(false)); assertThat(response.getItems()[0].isFailed(), equalTo(false)); assertThat(response.getItems()[0].getIndex(), equalTo("a2")); - assertThat(response.getItems()[0].getType(), equalTo("test")); assertThat(response.getItems()[0].getId(), equalTo("test")); assertThat(response.getItems()[0].getVersion(), equalTo(1L)); @@ -857,7 +856,7 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio // an index that doesn't exist yet will succeed client().prepareIndex("b1", "test", "test").setSource("{}", XContentType.JSON).get(); - response = client().prepareBulk().add(new IndexRequest("b2", "test", "test").source("{}", XContentType.JSON)).get(); + response = client().prepareBulk().add(new IndexRequest("b2").id("test").source("{}", XContentType.JSON)).get(); assertThat(response.hasFailures(), is(false)); assertThat(response.getItems()[0].isFailed(), equalTo(false)); assertThat(response.getItems()[0].getId(), equalTo("test")); diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index 4ef8b2ba38e67..6317dd62418f3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -138,7 +138,7 @@ public void testSimulate() throws Exception { source.put("foo", "bar"); source.put("fail", false); source.put("processed", true); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, source); + IngestDocument ingestDocument = new IngestDocument("index", "id", null, null, null, source); assertThat(simulateDocumentBaseResult.getIngestDocument().getSourceAndMetadata(), equalTo(ingestDocument.getSourceAndMetadata())); assertThat(simulateDocumentBaseResult.getFailure(), nullValue()); @@ -167,7 +167,7 @@ public void testBulkWithIngestFailures() throws Exception { int numRequests = scaledRandomIntBetween(32, 128); BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numRequests; i++) { - IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i)).setPipeline("_id"); + IndexRequest indexRequest = new IndexRequest("index").id(Integer.toString(i)).setPipeline("_id"); indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", i % 2 == 0); bulkRequest.add(indexRequest); } @@ -216,10 +216,10 @@ public void testBulkWithUpsert() throws Exception { client().admin().cluster().putPipeline(putPipelineRequest).get(); BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest = new IndexRequest("index", "type", "1").setPipeline("_id"); + IndexRequest indexRequest = new IndexRequest("index").id("1").setPipeline("_id"); indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "val1"); bulkRequest.add(indexRequest); - UpdateRequest updateRequest = new UpdateRequest("index", "type", "2"); + UpdateRequest updateRequest = new UpdateRequest("index", "2"); updateRequest.doc("{}", Requests.INDEX_CONTENT_TYPE); updateRequest.upsert("{\"field1\":\"upserted_val\"}", XContentType.JSON).upsertRequest().setPipeline("_id"); bulkRequest.add(updateRequest); @@ -227,10 +227,10 @@ public void testBulkWithUpsert() throws Exception { BulkResponse response = client().bulk(bulkRequest).actionGet(); assertThat(response.getItems().length, equalTo(bulkRequest.requests().size())); - Map inserted = client().prepareGet("index", "type", "1").get().getSourceAsMap(); + Map inserted = client().prepareGet("index", "1").get().getSourceAsMap(); assertThat(inserted.get("field1"), equalTo("val1")); assertThat(inserted.get("processed"), equalTo(true)); - Map upserted = client().prepareGet("index", "type", "2").get().getSourceAsMap(); + Map upserted = client().prepareGet("index", "2").get().getSourceAsMap(); assertThat(upserted.get("field1"), equalTo("upserted_val")); assertThat(upserted.get("processed"), equalTo(true)); } @@ -258,14 +258,14 @@ public void test() throws Exception { client().prepareIndex("test", "type", "1").setPipeline("_id").setSource("field", "value", "fail", false).get(); - Map doc = client().prepareGet("test", "type", "1").get().getSourceAsMap(); + Map doc = client().prepareGet("test", "1").get().getSourceAsMap(); assertThat(doc.get("field"), equalTo("value")); assertThat(doc.get("processed"), equalTo(true)); client().prepareBulk() .add(client().prepareIndex("test", "type", "2").setSource("field", "value2", "fail", false).setPipeline("_id")) .get(); - doc = client().prepareGet("test", "type", "2").get().getSourceAsMap(); + doc = client().prepareGet("test", "2").get().getSourceAsMap(); assertThat(doc.get("field"), equalTo("value2")); assertThat(doc.get("processed"), equalTo(true)); @@ -452,7 +452,7 @@ public void testPipelineProcessorOnFailure() throws Exception { } client().prepareIndex("test", "_doc").setId("1").setSource("{}", XContentType.JSON).setPipeline("1").get(); - Map inserted = client().prepareGet("test", "_doc", "1").get().getSourceAsMap(); + Map inserted = client().prepareGet("test", "1").get().getSourceAsMap(); assertThat(inserted.get("readme"), equalTo("pipeline with id [3] is a bad pipeline")); } diff --git a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java index e2480e0705ae3..3967f93f3a9b8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java @@ -68,8 +68,8 @@ public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { .get(); MultiGetResponse mgetResponse = client().prepareMultiGet() - .add(new MultiGetRequest.Item("test", "test", "1")) - .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1")) + .add(new MultiGetRequest.Item("test", "1")) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) .get(); assertThat(mgetResponse.getResponses().length, is(2)); @@ -84,7 +84,7 @@ public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { is("nonExistingIndex") ); - mgetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item("nonExistingIndex", "test", "1")).get(); + mgetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item("nonExistingIndex", "1")).get(); assertThat(mgetResponse.getResponses().length, is(1)); assertThat(mgetResponse.getResponses()[0].getIndex(), is("nonExistingIndex")); assertThat(mgetResponse.getResponses()[0].isFailed(), is(true)); @@ -105,8 +105,8 @@ public void testThatMgetShouldWorkWithMultiIndexAlias() throws IOException { .get(); MultiGetResponse mgetResponse = client().prepareMultiGet() - .add(new MultiGetRequest.Item("test", "test", "1")) - .add(new MultiGetRequest.Item("multiIndexAlias", "test", "1")) + .add(new MultiGetRequest.Item("test", "1")) + .add(new MultiGetRequest.Item("multiIndexAlias", "1")) .get(); assertThat(mgetResponse.getResponses().length, is(2)); @@ -117,7 +117,7 @@ public void testThatMgetShouldWorkWithMultiIndexAlias() throws IOException { assertThat(mgetResponse.getResponses()[1].isFailed(), is(true)); assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), containsString("more than one index")); - mgetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item("multiIndexAlias", "test", "1")).get(); + mgetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item("multiIndexAlias", "1")).get(); assertThat(mgetResponse.getResponses().length, is(1)); assertThat(mgetResponse.getResponses()[0].getIndex(), is("multiIndexAlias")); assertThat(mgetResponse.getResponses()[0].isFailed(), is(true)); @@ -144,7 +144,7 @@ public void testThatMgetShouldWorkWithAliasRouting() throws IOException { .setRefreshPolicy(IMMEDIATE) .get(); - MultiGetResponse mgetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item("alias1", "test", "1")).get(); + MultiGetResponse mgetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item("alias1", "1")).get(); assertEquals(1, mgetResponse.getResponses().length); assertEquals("test", mgetResponse.getResponses()[0].getIndex()); @@ -172,13 +172,13 @@ public void testThatSourceFilteringIsSupported() throws Exception { for (int i = 0; i < 100; i++) { if (i % 2 == 0) { request.add( - new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)).fetchSourceContext( + new MultiGetRequest.Item(indexOrAlias(), Integer.toString(i)).fetchSourceContext( new FetchSourceContext(true, new String[] { "included" }, new String[] { "*.hidden_field" }) ) ); } else { request.add( - new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext(false)) + new MultiGetRequest.Item(indexOrAlias(), Integer.toString(i)).fetchSourceContext(new FetchSourceContext(false)) ); } } @@ -219,8 +219,8 @@ public void testThatRoutingPerDocumentIsSupported() throws Exception { .get(); MultiGetResponse mgetResponse = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "test", id).routing(routingOtherShard)) - .add(new MultiGetRequest.Item(indexOrAlias(), "test", id)) + .add(new MultiGetRequest.Item(indexOrAlias(), id).routing(routingOtherShard)) + .add(new MultiGetRequest.Item(indexOrAlias(), id)) .get(); assertThat(mgetResponse.getResponses().length, is(2)); diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java index 2434afe5b8f06..708388b3328f0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -84,21 +84,13 @@ public void testFullClusterRestart() throws Exception { assertThat(tasksInProgress.tasks().size(), equalTo(numberOfTasks)); // Make sure that at least one of the tasks is running - assertBusy( - () -> { - // Wait for the task to start - assertThat( - client().admin() - .cluster() - .prepareListTasks() - .setActions(TestPersistentTasksExecutor.NAME + "[c]") - .get() - .getTasks() - .size(), - greaterThan(0) - ); - } - ); + assertBusy(() -> { + // Wait for the task to start + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(), + greaterThan(0) + ); + }); // Restart cluster internalCluster().fullRestart(); @@ -113,21 +105,13 @@ public void testFullClusterRestart() throws Exception { } logger.info("Waiting for {} tasks to start", numberOfTasks); - assertBusy( - () -> { - // Wait for all tasks to start - assertThat( - client().admin() - .cluster() - .prepareListTasks() - .setActions(TestPersistentTasksExecutor.NAME + "[c]") - .get() - .getTasks() - .size(), - equalTo(numberOfTasks) - ); - } - ); + assertBusy(() -> { + // Wait for all tasks to start + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(), + equalTo(numberOfTasks) + ); + }); logger.info("Complete all tasks"); // Complete the running task and make sure it finishes properly @@ -136,18 +120,16 @@ public void testFullClusterRestart() throws Exception { equalTo(numberOfTasks) ); - assertBusy( - () -> { - // Make sure the task is removed from the cluster state - assertThat( - ((PersistentTasksCustomMetadata) internalCluster().clusterService() - .state() - .getMetadata() - .custom(PersistentTasksCustomMetadata.TYPE)).tasks(), - empty() - ); - } - ); + assertBusy(() -> { + // Make sure the task is removed from the cluster state + assertThat( + ((PersistentTasksCustomMetadata) internalCluster().clusterService() + .state() + .getMetadata() + .custom(PersistentTasksCustomMetadata.TYPE)).tasks(), + empty() + ); + }); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java index 4e3cfa4fbb5c0..9ea80ae7dbd89 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java @@ -95,21 +95,13 @@ public void testPersistentActionFailure() throws Exception { PlainActionFuture> future = new PlainActionFuture<>(); persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); long allocationId = future.get().getAllocationId(); - assertBusy( - () -> { - // Wait for the task to start - assertThat( - client().admin() - .cluster() - .prepareListTasks() - .setActions(TestPersistentTasksExecutor.NAME + "[c]") - .get() - .getTasks() - .size(), - equalTo(1) - ); - } - ); + assertBusy(() -> { + // Wait for the task to start + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(), + equalTo(1) + ); + }); TaskInfo firstRunningTask = client().admin() .cluster() .prepareListTasks() @@ -130,15 +122,13 @@ public void testPersistentActionFailure() throws Exception { ); logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); - assertBusy( - () -> { - // Wait for the task to disappear completely - assertThat( - client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), - empty() - ); - } - ); + assertBusy(() -> { + // Wait for the task to disappear completely + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), + empty() + ); + }); } public void testPersistentActionCompletion() throws Exception { @@ -147,21 +137,13 @@ public void testPersistentActionCompletion() throws Exception { String taskId = UUIDs.base64UUID(); persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); long allocationId = future.get().getAllocationId(); - assertBusy( - () -> { - // Wait for the task to start - assertThat( - client().admin() - .cluster() - .prepareListTasks() - .setActions(TestPersistentTasksExecutor.NAME + "[c]") - .get() - .getTasks() - .size(), - equalTo(1) - ); - } - ); + assertBusy(() -> { + // Wait for the task to start + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(), + equalTo(1) + ); + }); TaskInfo firstRunningTask = client().admin() .cluster() .prepareListTasks() @@ -225,15 +207,13 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr"))); - assertBusy( - () -> { - // Wait for the task to disappear completely - assertThat( - client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), - empty() - ); - } - ); + assertBusy(() -> { + // Wait for the task to disappear completely + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), + empty() + ); + }); // Remove the persistent task PlainActionFuture> removeFuture = new PlainActionFuture<>(); @@ -368,21 +348,13 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2); assertFutureThrows(future2, ResourceAlreadyExistsException.class); - assertBusy( - () -> { - // Wait for the task to start - assertThat( - client().admin() - .cluster() - .prepareListTasks() - .setActions(TestPersistentTasksExecutor.NAME + "[c]") - .get() - .getTasks() - .size(), - equalTo(1) - ); - } - ); + assertBusy(() -> { + // Wait for the task to start + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(), + equalTo(1) + ); + }); TaskInfo firstRunningTask = client().admin() .cluster() @@ -400,15 +372,13 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { ); logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); - assertBusy( - () -> { - // Wait for the task to disappear completely - assertThat( - client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), - empty() - ); - } - ); + assertBusy(() -> { + // Wait for the task to disappear completely + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), + empty() + ); + }); } public void testUnassignRunningPersistentTask() throws Exception { @@ -489,21 +459,13 @@ private void stopOrCancelTask(TaskId taskId) { } private static void waitForTaskToStart() throws Exception { - assertBusy( - () -> { - // Wait for the task to start - assertThat( - client().admin() - .cluster() - .prepareListTasks() - .setActions(TestPersistentTasksExecutor.NAME + "[c]") - .get() - .getTasks() - .size(), - equalTo(1) - ); - } - ); + assertBusy(() -> { + // Wait for the task to start + assertThat( + client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(), + equalTo(1) + ); + }); } private static void assertClusterStateHasTask(String taskId) { diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java index 3ada65909b72f..26b3e9ae336dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java @@ -419,7 +419,7 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, ShardId docShard = clusterService.operationRouting().shardId(state, "test", id, null); if (docShard.id() == shard) { for (ShardRouting shardRouting : state.routingTable().shardRoutingTable("test", shard)) { - GetResponse response = client().prepareGet("test", "type", id) + GetResponse response = client().prepareGet("test", id) .setPreference("_only_nodes:" + shardRouting.currentNodeId()) .get(); if (response.isExists()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index c37066962cc11..9b26ee101909b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -749,7 +749,6 @@ public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Except .prepareCreate(indexName) .setSettings( Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, halfNodes - 1)) .put("index.routing.allocation.include.color", "blue") ) @@ -811,8 +810,8 @@ public void sendRequest( if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { // corrupting the segments_N files in order to make sure future recovery re-send files logger.debug("corrupting [{}] to {}. file name: [{}]", action, connection.getNode(), chunkRequest.name()); - assert chunkRequest.content().toBytesRef().bytes == chunkRequest.content() - .toBytesRef().bytes : "no internal reference!!"; + assert chunkRequest.content().toBytesRef().bytes == chunkRequest.content().toBytesRef().bytes + : "no internal reference!!"; byte[] array = chunkRequest.content().toBytesRef().bytes; array[0] = (byte) ~array[0]; // flip one byte in the content corruptionCount.countDown(); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java index cb80dddb81cb6..4ebb840c600d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java @@ -67,12 +67,12 @@ public void testSimpleRecovery() throws Exception { NumShards numShards = getNumShards("test"); - client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); + client().index(indexRequest("test").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); FlushResponse flushResponse = client().admin().indices().flush(flushRequest("test")).actionGet(); assertThat(flushResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(flushResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(flushResponse.getFailedShards(), equalTo(0)); - client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"), XContentType.JSON)).actionGet(); + client().index(indexRequest("test").id("2").source(source("2", "test"), XContentType.JSON)).actionGet(); RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet(); assertThat(refreshResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); @@ -86,13 +86,13 @@ public void testSimpleRecovery() throws Exception { GetResponse getResult; for (int i = 0; i < 5; i++) { - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("1", "test"))); - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("1", "test"))); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("2", "test"))); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("2", "test"))); } @@ -103,17 +103,17 @@ public void testSimpleRecovery() throws Exception { ensureGreen(); for (int i = 0; i < 5; i++) { - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("1", "test"))); - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("1", "test"))); - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); + getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("1", "test"))); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("2", "test"))); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("2", "test"))); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); + getResult = client().get(getRequest("test").id("2")).actionGet(); assertThat(getResult.getSourceAsString(), equalTo(source("2", "test"))); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java index 3b05b6d3bb21b..a1dd32aa300c9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java @@ -66,16 +66,16 @@ public void testAliasCrudRouting() throws Exception { client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> verifying get with routing alias, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> updating with id [1] and routing through alias"); @@ -85,9 +85,9 @@ public void testAliasCrudRouting() throws Exception { .execute() .actionGet(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); assertThat( - client().prepareGet("alias0", "type1", "1").execute().actionGet().getSourceAsMap().get("field").toString(), + client().prepareGet("alias0", "1").execute().actionGet().getSourceAsMap().get("field").toString(), equalTo("value2") ); } @@ -95,29 +95,29 @@ public void testAliasCrudRouting() throws Exception { logger.info("--> deleting with no routing, should not delete anything"); client().prepareDelete("test", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> deleting with routing alias, should delete"); client().prepareDelete("alias0", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0] using alias"); client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); } } @@ -137,11 +137,11 @@ public void testAliasSearchRouting() throws Exception { client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> search with no routing, should fine one"); @@ -494,22 +494,22 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test-a", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias-a0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias-a0", "1").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> indexing with id [0], and routing [1] using alias to test-b"); client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test-a", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias-b1", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias-b1", "1").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> search with alias-a1,alias-b0, should not find"); @@ -655,7 +655,7 @@ public void testIndexingAliasesOverTime() throws Exception { logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); assertThat( client().prepareSearch("alias") .setQuery(QueryBuilders.matchAllQuery()) @@ -717,8 +717,8 @@ public void testIndexingAliasesOverTime() throws Exception { logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("4").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("4").execute().actionGet().isExists(), equalTo(true)); assertThat( client().prepareSearch("alias") .setQuery(QueryBuilders.matchAllQuery()) diff --git a/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java index 8dd2bd7c1235e..905445b2d7aeb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java @@ -231,7 +231,7 @@ private void verifyGets(String index, Map> routingToDocument String routing = routingEntry.getKey(); for (String id : routingEntry.getValue()) { - assertTrue(client().prepareGet(index, "type", id).setRouting(routing).execute().actionGet().isExists()); + assertTrue(client().prepareGet(index, id).setRouting(routing).execute().actionGet().isExists()); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java index ae9f08f5c5489..6e9498d177aaf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java @@ -100,25 +100,25 @@ public void testSimpleCrudRouting() throws Exception { .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); } logger.info("--> deleting with no routing, should not delete anything"); client().prepareDelete("test", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); } logger.info("--> deleting with routing, should delete"); client().prepareDelete("test", "type1", "1").setRouting(routingValue).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0]"); @@ -129,11 +129,11 @@ public void testSimpleCrudRouting() throws Exception { .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); } } @@ -150,11 +150,11 @@ public void testSimpleSearchRouting() { .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); } logger.info("--> search with no routing, should fine one"); @@ -381,10 +381,7 @@ public void testRequiredRoutingCrudApis() throws Exception { logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat( - client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), - equalTo(true) - ); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); } logger.info("--> deleting with no routing, should fail"); @@ -397,16 +394,13 @@ public void testRequiredRoutingCrudApis() throws Exception { for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); fail("get with missing routing when routing is required should fail"); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]")); + assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - assertThat( - client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), - equalTo(true) - ); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); } try { @@ -427,13 +421,13 @@ public void testRequiredRoutingCrudApis() throws Exception { for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); fail(); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]")); + assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - GetResponse getResponse = client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet(); + GetResponse getResponse = client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsMap().get("field"), equalTo("value2")); } @@ -442,16 +436,13 @@ public void testRequiredRoutingCrudApis() throws Exception { for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); fail(); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]")); + assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - assertThat( - client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), - equalTo(false) - ); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false)); } } @@ -476,7 +467,7 @@ public void testRequiredRoutingBulk() throws Exception { ensureGreen(); { BulkResponse bulkResponse = client().prepareBulk() - .add(Requests.indexRequest(indexOrAlias()).type("type1").id("1").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(Requests.indexRequest(indexOrAlias()).id("1").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) .execute() .actionGet(); assertThat(bulkResponse.getItems().length, equalTo(1)); @@ -487,19 +478,13 @@ public void testRequiredRoutingBulk() throws Exception { assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); - assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); + assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[1]")); } } { BulkResponse bulkResponse = client().prepareBulk() - .add( - Requests.indexRequest(indexOrAlias()) - .type("type1") - .id("1") - .routing("0") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value") - ) + .add(Requests.indexRequest(indexOrAlias()).id("1").routing("0").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) .execute() .actionGet(); assertThat(bulkResponse.hasFailures(), equalTo(false)); @@ -507,7 +492,7 @@ public void testRequiredRoutingBulk() throws Exception { { BulkResponse bulkResponse = client().prepareBulk() - .add(new UpdateRequest(indexOrAlias(), "type1", "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2")) + .add(new UpdateRequest(indexOrAlias(), "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2")) .execute() .actionGet(); assertThat(bulkResponse.getItems().length, equalTo(1)); @@ -518,23 +503,20 @@ public void testRequiredRoutingBulk() throws Exception { assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); - assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); + assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[1]")); } } { BulkResponse bulkResponse = client().prepareBulk() - .add(new UpdateRequest(indexOrAlias(), "type1", "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2").routing("0")) + .add(new UpdateRequest(indexOrAlias(), "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2").routing("0")) .execute() .actionGet(); assertThat(bulkResponse.hasFailures(), equalTo(false)); } { - BulkResponse bulkResponse = client().prepareBulk() - .add(Requests.deleteRequest(indexOrAlias()).type("type1").id("1")) - .execute() - .actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(Requests.deleteRequest(indexOrAlias()).id("1")).execute().actionGet(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -543,13 +525,13 @@ public void testRequiredRoutingBulk() throws Exception { assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); - assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); + assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[1]")); } } { BulkResponse bulkResponse = client().prepareBulk() - .add(Requests.deleteRequest(indexOrAlias()).type("type1").id("1").routing("0")) + .add(Requests.deleteRequest(indexOrAlias()).id("1").routing("0")) .execute() .actionGet(); assertThat(bulkResponse.getItems().length, equalTo(1)); @@ -588,21 +570,18 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { .get(); logger.info("--> verifying get with id [1] with routing [0], should succeed"); - assertThat( - client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), - equalTo(true) - ); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); logger.info("--> verifying get with id [1], with no routing, should fail"); try { - client().prepareGet(indexOrAlias(), "type1", "1").get(); + client().prepareGet(indexOrAlias(), "1").get(); fail(); } catch (RoutingMissingException e) { - assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]")); + assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } logger.info("--> verifying explain with id [2], with routing [0], should succeed"); - ExplainResponse explainResponse = client().prepareExplain(indexOrAlias(), "type1", "2") + ExplainResponse explainResponse = client().prepareExplain(indexOrAlias(), "2") .setQuery(QueryBuilders.matchAllQuery()) .setRouting(routingValue) .get(); @@ -611,22 +590,22 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { logger.info("--> verifying explain with id [2], with no routing, should fail"); try { - client().prepareExplain(indexOrAlias(), "type1", "2").setQuery(QueryBuilders.matchAllQuery()).get(); + client().prepareExplain(indexOrAlias(), "2").setQuery(QueryBuilders.matchAllQuery()).get(); fail(); } catch (RoutingMissingException e) { - assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[2]")); + assertThat(e.getMessage(), equalTo("routing is required for [test]/[2]")); } logger.info("--> verifying term vector with id [1], with routing [0], should succeed"); - TermVectorsResponse termVectorsResponse = client().prepareTermVectors(indexOrAlias(), "type1", "1").setRouting(routingValue).get(); + TermVectorsResponse termVectorsResponse = client().prepareTermVectors(indexOrAlias(), "1").setRouting(routingValue).get(); assertThat(termVectorsResponse.isExists(), equalTo(true)); assertThat(termVectorsResponse.getId(), equalTo("1")); try { - client().prepareTermVectors(indexOrAlias(), "type1", "1").get(); + client().prepareTermVectors(indexOrAlias(), "1").get(); fail(); } catch (RoutingMissingException e) { - assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]")); + assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") @@ -640,13 +619,13 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field1", "value1").get(); fail(); } catch (RoutingMissingException e) { - assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]")); + assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } logger.info("--> verifying mget with ids [1,2], with routing [0], should succeed"); MultiGetResponse multiGetResponse = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").routing("0")) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").routing("0")) + .add(new MultiGetRequest.Item(indexOrAlias(), "1").routing("0")) + .add(new MultiGetRequest.Item(indexOrAlias(), "2").routing("0")) .get(); assertThat(multiGetResponse.getResponses().length, equalTo(2)); assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(false)); @@ -656,20 +635,20 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { logger.info("--> verifying mget with ids [1,2], with no routing, should fail"); multiGetResponse = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1")) - .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2")) + .add(new MultiGetRequest.Item(indexOrAlias(), "1")) + .add(new MultiGetRequest.Item(indexOrAlias(), "2")) .get(); assertThat(multiGetResponse.getResponses().length, equalTo(2)); assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(true)); assertThat(multiGetResponse.getResponses()[0].getFailure().getId(), equalTo("1")); - assertThat(multiGetResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required for [test]/[type1]/[1]")); + assertThat(multiGetResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required for [test]/[1]")); assertThat(multiGetResponse.getResponses()[1].isFailed(), equalTo(true)); assertThat(multiGetResponse.getResponses()[1].getFailure().getId(), equalTo("2")); - assertThat(multiGetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required for [test]/[type1]/[2]")); + assertThat(multiGetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required for [test]/[2]")); MultiTermVectorsResponse multiTermVectorsResponse = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").routing(routingValue)) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").routing(routingValue)) + .add(new TermVectorsRequest(indexOrAlias(), "1").routing(routingValue)) + .add(new TermVectorsRequest(indexOrAlias(), "2").routing(routingValue)) .get(); assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2)); assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1")); @@ -682,15 +661,15 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { assertThat(multiTermVectorsResponse.getResponses()[1].getResponse().isExists(), equalTo(true)); multiTermVectorsResponse = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1")) - .add(new TermVectorsRequest(indexOrAlias(), "type1", "2")) + .add(new TermVectorsRequest(indexOrAlias(), "1")) + .add(new TermVectorsRequest(indexOrAlias(), "2")) .get(); assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2)); assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1")); assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(true)); assertThat( multiTermVectorsResponse.getResponses()[0].getFailure().getCause().getMessage(), - equalTo("routing is required for [test]/[type1]/[1]") + equalTo("routing is required for [test]/[1]") ); assertThat(multiTermVectorsResponse.getResponses()[0].getResponse(), nullValue()); assertThat(multiTermVectorsResponse.getResponses()[1].getId(), equalTo("2")); @@ -698,7 +677,7 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { assertThat(multiTermVectorsResponse.getResponses()[1].getResponse(), nullValue()); assertThat( multiTermVectorsResponse.getResponses()[1].getFailure().getCause().getMessage(), - equalTo("routing is required for [test]/[type1]/[2]") + equalTo("routing is required for [test]/[2]") ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java index 60d2c3c05a569..ee406fd94d149 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java @@ -99,7 +99,6 @@ public void setupSuiteScopeCluster() throws Exception { public void testSingleValueField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) .get(); @@ -132,7 +131,6 @@ public void testSingleValueField() throws Exception { public void testMultiValueField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) .get(); @@ -165,7 +163,6 @@ public void testMultiValueField() throws Exception { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .setTypes("type") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).size(between(1, 5)).collectMode(randomFrom(SubAggCollectionMode.values())) ) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index 8aa4d517d8f36..9c7e2be8b9121 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -1788,7 +1788,6 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { ZonedDateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(ZonedDateTime[]::new); SearchResponse response = client().prepareSearch("sort_idx") - .setTypes("type") .addAggregation( dateHistogram("histo").field("date") .dateHistogramInterval(DateHistogramInterval.DAY) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index b2e24e73c2790..39ddac46c8220 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -121,7 +121,6 @@ public void testIssue10719() throws Exception { // statement boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("test") - .setTypes("book") .setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation( terms("genres").field("genre") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java index cdfab74c58774..58fce68b12a5a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java @@ -938,7 +938,6 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(double[] expectedKeys, BucketOrder... order) { SearchResponse response = client().prepareSearch("sort_idx") - .setTypes("multi_sort_type") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java index 139a4f8efdc14..a7429f19a0641 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java @@ -1391,7 +1391,6 @@ public void testHardBounds() throws Exception { private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { SearchResponse response = client().prepareSearch("sort_idx") - .setTypes("type") .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME) .interval(1) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java index d9c910eb0979b..804b0cae93de7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java @@ -886,7 +886,6 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { SearchResponse response = client().prepareSearch("sort_idx") - .setTypes("multi_sort_type") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java index f5507b17049e6..c02a5107192a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java @@ -332,7 +332,6 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord private void testMinDocCountOnTerms(String field, Script script, BucketOrder order, String include, boolean retry) throws Exception { // all terms final SearchResponse allTermsResponse = client().prepareSearch("idx") - .setTypes("type") .setSize(0) .setQuery(QUERY) .addAggregation( @@ -352,7 +351,6 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord for (long minDocCount = 0; minDocCount < 20; ++minDocCount) { final int size = randomIntBetween(1, cardinality + 2); final SearchRequest request = client().prepareSearch("idx") - .setTypes("type") .setSize(0) .setQuery(QUERY) .addAggregation( @@ -407,7 +405,6 @@ public void testDateHistogramKeyDesc() throws Exception { private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final int interval = randomIntBetween(1, 3); final SearchResponse allResponse = client().prepareSearch("idx") - .setTypes("type") .setSize(0) .setQuery(QUERY) .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)) @@ -417,7 +414,6 @@ private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { final SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setSize(0) .setQuery(QUERY) .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)) @@ -428,7 +424,6 @@ private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception { final SearchResponse allResponse = client().prepareSearch("idx") - .setTypes("type") .setSize(0) .setQuery(QUERY) .addAggregation( @@ -440,7 +435,6 @@ private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { final SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setSize(0) .setQuery(QUERY) .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index 17e894b411a2a..2f03dbbf01c1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -478,7 +478,6 @@ public void testParentFilterResolvedCorrectly() throws Exception { indexRandom(true, indexRequests); SearchResponse response = client().prepareSearch("idx2") - .setTypes("provider") .addAggregation( terms("startDate").field("dates.month.start") .subAggregation( @@ -586,7 +585,6 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { refresh(); SearchResponse response = client().prepareSearch("idx4") - .setTypes("product") .addAggregation( terms("category").field("categories") .subAggregation(nested("property", "property").subAggregation(terms("property_id").field("property.id"))) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index 2f586f2b9e788..89bdaa04615dd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -120,7 +120,6 @@ public void testIssue10719() throws Exception { // statement boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("test") - .setTypes("book") .setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation( terms("genres").field("genre") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java index 493dae383eab5..8f3d94c2eacdb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -51,7 +51,6 @@ public void testNoShardSizeString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) @@ -76,7 +75,6 @@ public void testShardSizeEqualsSizeString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key") @@ -106,7 +104,6 @@ public void testWithShardSizeString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key") @@ -136,7 +133,6 @@ public void testWithShardSizeStringSingleShard() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation( @@ -166,7 +162,6 @@ public void testNoShardSizeTermOrderString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) @@ -191,7 +186,6 @@ public void testNoShardSizeLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) @@ -216,7 +210,6 @@ public void testShardSizeEqualsSizeLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key") @@ -245,7 +238,6 @@ public void testWithShardSizeLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key") @@ -275,7 +267,6 @@ public void testWithShardSizeLongSingleShard() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation( @@ -305,7 +296,6 @@ public void testNoShardSizeTermOrderLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) @@ -330,7 +320,6 @@ public void testNoShardSizeDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) @@ -355,7 +344,6 @@ public void testShardSizeEqualsSizeDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key") @@ -384,7 +372,6 @@ public void testWithShardSizeDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key") @@ -413,7 +400,6 @@ public void testWithShardSizeDoubleSingleShard() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation( @@ -443,7 +429,6 @@ public void testNoShardSizeTermOrderDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .setQuery(matchAllQuery()) .addAggregation( terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index c075035abfea9..0046dbbd66e44 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -148,11 +148,9 @@ public void testXContentResponse() throws Exception { // Use significant_text on text fields but occasionally run with alternative of // significant_terms on legacy fieldData=true too. request = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation(terms("class").field(CLASS_FIELD).subAggregation(significantText("sig_terms", TEXT_FIELD))); } else { request = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation(terms("class").field(CLASS_FIELD).subAggregation(significantTerms("sig_terms").field(TEXT_FIELD))); } @@ -245,13 +243,11 @@ public void testPopularTermManyDeletedDocs() throws Exception { SearchRequestBuilder request; if (randomBoolean()) { request = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation( terms("class").field(CLASS_FIELD).subAggregation(significantTerms("sig_terms").field(TEXT_FIELD).minDocCount(1)) ); } else { request = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation(terms("class").field(CLASS_FIELD).subAggregation(significantText("sig_terms", TEXT_FIELD).minDocCount(1))); } @@ -282,7 +278,6 @@ public void testBackgroundVsSeparateSet( SearchRequestBuilder request1; if (useSigText) { request1 = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation( terms("class").field(CLASS_FIELD) .subAggregation( @@ -292,7 +287,6 @@ public void testBackgroundVsSeparateSet( ); } else { request1 = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation( terms("class").field(CLASS_FIELD) .subAggregation( @@ -309,7 +303,6 @@ public void testBackgroundVsSeparateSet( SearchRequestBuilder request2; if (useSigText) { request2 = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation( filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")).subAggregation( significantText("sig_terms", TEXT_FIELD).minDocCount(1) @@ -326,7 +319,6 @@ public void testBackgroundVsSeparateSet( ); } else { request2 = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation( filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")).subAggregation( significantTerms("sig_terms").field(TEXT_FIELD) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index aa2255435c8ff..789b1ced7ffae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -303,7 +303,6 @@ public void testStringValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -317,7 +316,6 @@ public void testStringValueField() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -337,7 +335,6 @@ public void testStringValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -351,7 +348,6 @@ public void testStringValueFieldSingleShard() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -372,7 +368,6 @@ public void testStringValueFieldWithRouting() throws Exception { int shardSize = randomIntBetween(size, size * 2); SearchResponse testResponse = client().prepareSearch("idx_with_routing") - .setTypes("type") .setRouting(String.valueOf(between(1, numRoutingValues))) .addAggregation( terms("terms").executionHint(randomExecutionHint()) @@ -393,7 +388,6 @@ public void testStringValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -408,7 +402,6 @@ public void testStringValueFieldDocCountAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -429,7 +422,6 @@ public void testStringValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -444,7 +436,6 @@ public void testStringValueFieldTermSortAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -465,7 +456,6 @@ public void testStringValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -480,7 +470,6 @@ public void testStringValueFieldTermSortDesc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -501,7 +490,6 @@ public void testStringValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -517,7 +505,6 @@ public void testStringValueFieldSubAggAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -539,7 +526,6 @@ public void testStringValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -555,7 +541,6 @@ public void testStringValueFieldSubAggDesc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) @@ -577,7 +562,6 @@ public void testLongValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -591,7 +575,6 @@ public void testLongValueField() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -611,7 +594,6 @@ public void testLongValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -625,7 +607,6 @@ public void testLongValueFieldSingleShard() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -646,7 +627,6 @@ public void testLongValueFieldWithRouting() throws Exception { int shardSize = randomIntBetween(size, size * 2); SearchResponse testResponse = client().prepareSearch("idx_with_routing") - .setTypes("type") .setRouting(String.valueOf(between(1, numRoutingValues))) .addAggregation( terms("terms").executionHint(randomExecutionHint()) @@ -667,7 +647,6 @@ public void testLongValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -682,7 +661,6 @@ public void testLongValueFieldDocCountAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -703,7 +681,6 @@ public void testLongValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -718,7 +695,6 @@ public void testLongValueFieldTermSortAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -739,7 +715,6 @@ public void testLongValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -754,7 +729,6 @@ public void testLongValueFieldTermSortDesc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -775,7 +749,6 @@ public void testLongValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -791,7 +764,6 @@ public void testLongValueFieldSubAggAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -813,7 +785,6 @@ public void testLongValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -829,7 +800,6 @@ public void testLongValueFieldSubAggDesc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) @@ -851,7 +821,6 @@ public void testDoubleValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -865,7 +834,6 @@ public void testDoubleValueField() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -885,7 +853,6 @@ public void testDoubleValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -899,7 +866,6 @@ public void testDoubleValueFieldSingleShard() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -920,7 +886,6 @@ public void testDoubleValueFieldWithRouting() throws Exception { int shardSize = randomIntBetween(size, size * 2); SearchResponse testResponse = client().prepareSearch("idx_with_routing") - .setTypes("type") .setRouting(String.valueOf(between(1, numRoutingValues))) .addAggregation( terms("terms").executionHint(randomExecutionHint()) @@ -941,7 +906,6 @@ public void testDoubleValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -956,7 +920,6 @@ public void testDoubleValueFieldDocCountAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -977,7 +940,6 @@ public void testDoubleValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -992,7 +954,6 @@ public void testDoubleValueFieldTermSortAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -1013,7 +974,6 @@ public void testDoubleValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -1028,7 +988,6 @@ public void testDoubleValueFieldTermSortDesc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -1049,7 +1008,6 @@ public void testDoubleValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -1065,7 +1023,6 @@ public void testDoubleValueFieldSubAggAsc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -1087,7 +1044,6 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -1103,7 +1059,6 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) @@ -1128,7 +1083,6 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { */ public void testFixedDocs() throws Exception { SearchResponse response = client().prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java index ead7a654baadf..e4604c0a91523 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -368,7 +368,6 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx") - .setTypes("type") .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) .get(); assertSearchResponse(allResponse); @@ -382,7 +381,6 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").field(field) .includeExclude(new IncludeExclude(partition, numPartitions)) @@ -402,7 +400,6 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -428,7 +425,6 @@ public void testSingleValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(MULTI_VALUED_FIELD_NAME) @@ -452,7 +448,6 @@ public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { public void testMultiValuedScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .script( @@ -488,7 +483,6 @@ public void testMultiValuedScript() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(MULTI_VALUED_FIELD_NAME) @@ -537,7 +531,6 @@ public void testScriptSingleValue() throws Exception { ); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint()).script(script) ) @@ -567,7 +560,6 @@ public void testScriptSingleValueExplicitSingleValue() throws Exception { ); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint()).script(script) ) @@ -590,7 +582,6 @@ public void testScriptSingleValueExplicitSingleValue() throws Exception { public void testScriptMultiValued() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint(randomExecutionHint()) @@ -626,7 +617,6 @@ public void testScriptMultiValued() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -652,7 +642,6 @@ public void testPartiallyUnmapped() throws Exception { public void testStringTermsNestedIntoPerBucketAggregator() throws Exception { // no execution hint so that the logic that decides whether or not to use ordinals is executed SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) @@ -681,7 +670,6 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { boolean asc = true; try { client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -710,7 +698,6 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("tags").executionHint(randomExecutionHint()) .field("tag") @@ -749,7 +736,6 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("tags").executionHint(randomExecutionHint()) .field("tag") @@ -813,7 +799,6 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("tags").executionHint(randomExecutionHint()) .field("tag") @@ -877,7 +862,6 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("tags").executionHint(randomExecutionHint()) .field("tag") @@ -936,7 +920,6 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -957,7 +940,6 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -982,7 +964,6 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMe for (String index : Arrays.asList("idx", "idx_unmapped")) { try { SearchResponse response = client().prepareSearch(index) - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -1008,7 +989,6 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -1033,7 +1013,6 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -1066,7 +1045,6 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -1100,7 +1078,6 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -1134,7 +1111,6 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex public void testSingleValuedFieldOrderedByStatsAggAscWithTermsSubAgg() throws Exception { boolean asc = true; SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) @@ -1253,7 +1229,6 @@ private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order public void testIndexMetaField() throws Exception { SearchResponse response = client().prepareSearch("idx", "empty_bucket_idx") - .setTypes("type") .addAggregation( terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint(randomExecutionHint()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java index 9be3ed91676c0..6a1c9fac45ab2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java @@ -204,7 +204,6 @@ private static String multiNumericField(boolean hash) { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) .get(); @@ -218,7 +217,6 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) .get(); @@ -232,7 +230,6 @@ public void testPartiallyUnmapped() throws Exception { public void testSingleValuedString() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) .get(); @@ -246,7 +243,6 @@ public void testSingleValuedString() throws Exception { public void testSingleValuedNumeric() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) .get(); @@ -289,7 +285,6 @@ public void testSingleValuedNumericGetProperty() throws Exception { public void testSingleValuedNumericHashed() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) .get(); @@ -303,7 +298,6 @@ public void testSingleValuedNumericHashed() throws Exception { public void testMultiValuedString() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")) .get(); @@ -317,7 +311,6 @@ public void testMultiValuedString() throws Exception { public void testMultiValuedNumeric() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false))) .get(); @@ -331,7 +324,6 @@ public void testMultiValuedNumeric() throws Exception { public void testMultiValuedNumericHashed() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(true))) .get(); @@ -345,7 +337,6 @@ public void testMultiValuedNumericHashed() throws Exception { public void testSingleValuedStringScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( cardinality("cardinality").precisionThreshold(precisionThreshold) .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_value'].value", emptyMap())) @@ -362,7 +353,6 @@ public void testSingleValuedStringScript() throws Exception { public void testMultiValuedStringScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( cardinality("cardinality").precisionThreshold(precisionThreshold) .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_values']", emptyMap())) @@ -380,7 +370,6 @@ public void testMultiValuedStringScript() throws Exception { public void testSingleValuedNumericScript() throws Exception { Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + singleNumericField() + '].value", emptyMap()); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) .get(); @@ -400,7 +389,6 @@ public void testMultiValuedNumericScript() throws Exception { Collections.emptyMap() ); SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) .get(); @@ -414,7 +402,6 @@ public void testMultiValuedNumericScript() throws Exception { public void testSingleValuedStringValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( cardinality("cardinality").precisionThreshold(precisionThreshold) .field("str_value") @@ -432,7 +419,6 @@ public void testSingleValuedStringValueScript() throws Exception { public void testMultiValuedStringValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( cardinality("cardinality").precisionThreshold(precisionThreshold) .field("str_values") @@ -450,7 +436,6 @@ public void testMultiValuedStringValueScript() throws Exception { public void testSingleValuedNumericValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( cardinality("cardinality").precisionThreshold(precisionThreshold) .field(singleNumericField()) @@ -468,7 +453,6 @@ public void testSingleValuedNumericValueScript() throws Exception { public void testMultiValuedNumericValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( cardinality("cardinality").precisionThreshold(precisionThreshold) .field(multiNumericField(false)) @@ -486,7 +470,6 @@ public void testMultiValuedNumericValueScript() throws Exception { public void testAsSubAgg() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( terms("terms").field("str_value") .collectMode(randomFrom(SubAggCollectionMode.values())) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java index 17a5639070aa9..bb3f62d399444 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java @@ -1123,7 +1123,6 @@ public void testNoStoredFields() throws Exception { for (SearchHit hit : hits) { assertThat(hit.getSourceAsMap(), nullValue()); assertThat(hit.getId(), nullValue()); - assertThat(hit.getType(), equalTo("type")); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java index b7a11bbe4aba7..d4480aefbc767 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java @@ -241,7 +241,6 @@ private void setupExpected(MetricTarget target) { public void testBasicDiff() { SearchResponse response = client().prepareSearch("idx") - .setTypes("type") .addAggregation( histogram("histo").field(INTERVAL_FIELD) .interval(interval) @@ -286,7 +285,6 @@ public void testBasicDiff() { public void testInvalidLagSize() { try { client().prepareSearch("idx") - .setTypes("type") .addAggregation( histogram("histo").field(INTERVAL_FIELD) .interval(interval) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java index cec152ec8829e..eeef08ee58baa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -192,7 +192,6 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); SearchResponse searchResponse = client().prepareSearch() - .setTypes("type") .setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) .setSize(expectedResults) .get(); @@ -202,7 +201,6 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc } // check match all searchResponse = client().prepareSearch() - .setTypes("type") .setQuery(QueryBuilders.matchAllQuery()) .setSize(numCreated + numInitialDocs) .addSort("_uid", SortOrder.ASC) @@ -239,10 +237,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc ); client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); - SearchResponse searchResponse = client().prepareSearch() - .setTypes("type") - .setQuery(QueryBuilders.matchQuery("test", "init")) - .get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, numInitialDocs); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java index b44e4be011475..7982d9f5781fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java @@ -136,7 +136,7 @@ public void testFailedSearchWithWrongQuery() throws Exception { } private void index(Client client, String id, String nameValue, int age) throws IOException { - client.index(Requests.indexRequest("test").type("type").id(id).source(source(id, nameValue, age))).actionGet(); + client.index(Requests.indexRequest("test").id(id).source(source(id, nameValue, age))).actionGet(); } private XContentBuilder source(String id, String nameValue, int age) throws IOException { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index 23ca51b830fe1..420121006a943 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -109,7 +109,7 @@ private Set prepareData(int numShards) throws Exception { } private void index(String id, String nameValue, int age) throws IOException { - client().index(Requests.indexRequest("test").type("type").id(id).source(source(id, nameValue, age))).actionGet(); + client().index(Requests.indexRequest("test").id(id).source(source(id, nameValue, age))).actionGet(); } private XContentBuilder source(String id, String nameValue, int age) throws IOException { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 082a8df529a0b..68bac89213c57 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -94,9 +94,8 @@ public void testPlugin() throws Exception { ) .get(); - client().index( - indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("test", "I am sam i am").endObject()) - ).actionGet(); + client().index(indexRequest("test").id("1").source(jsonBuilder().startObject().field("test", "I am sam i am").endObject())) + .actionGet(); client().admin().indices().prepareRefresh().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index de7000de6f208..59dc710f9c1ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -71,7 +71,6 @@ protected void setup() throws Exception { public void testThatCustomHighlightersAreSupported() throws IOException { SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("test") .setQuery(QueryBuilders.matchAllQuery()) .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")) .get(); @@ -86,7 +85,6 @@ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception highlightConfig.options(options); SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("test") .setQuery(QueryBuilders.matchAllQuery()) .highlighter(new HighlightBuilder().field(highlightConfig)) .get(); @@ -100,7 +98,6 @@ public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception options.put("myGlobalOption", "someValue"); SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("test") .setQuery(QueryBuilders.matchAllQuery()) .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)) .get(); @@ -111,7 +108,6 @@ public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception public void testThatCustomHighlighterReceivesFieldsInOrder() throws Exception { SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("test") .setQuery(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders.termQuery("name", "arbitrary"))) .highlighter( new HighlightBuilder().highlighterType("test-custom") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index dc60f503ab47f..85d8f26036177 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -2171,7 +2171,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { index("test", "type1", "2", "text", new String[] { "", text2 }); refresh(); - IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("2"); + IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery().addIds("2"); field.highlighterType("plain"); response = client().prepareSearch("test").setQuery(idsQueryBuilder).highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); @@ -2188,7 +2188,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { // But if the field was actually empty then you should get no highlighting field index("test", "type1", "3", "text", new String[] {}); refresh(); - idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("3"); + idsQueryBuilder = QueryBuilders.idsQuery().addIds("3"); field.highlighterType("plain"); response = client().prepareSearch("test").setQuery(idsQueryBuilder).highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); @@ -2205,7 +2205,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { index("test", "type1", "4"); refresh(); - idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("4"); + idsQueryBuilder = QueryBuilders.idsQuery().addIds("4"); field.highlighterType("plain"); response = client().prepareSearch("test").setQuery(idsQueryBuilder).highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); @@ -3042,7 +3042,6 @@ public void testDoesNotHighlightTypeName() throws Exception { for (String highlighter : ALL_TYPES) { SearchResponse response = client().prepareSearch("test") - .setTypes("typename") .setQuery(matchQuery("foo", "test")) .highlighter(new HighlightBuilder().field("foo").highlighterType(highlighter).requireFieldMatch(false)) .get(); @@ -3071,7 +3070,6 @@ public void testDoesNotHighlightAliasFilters() throws Exception { for (String highlighter : ALL_TYPES) { SearchResponse response = client().prepareSearch("filtered_alias") - .setTypes("typename") .setQuery(matchQuery("foo", "test")) .highlighter(new HighlightBuilder().field("foo").highlighterType(highlighter).requireFieldMatch(false)) .get(); @@ -3523,7 +3521,7 @@ public void testWithNormalizer() throws Exception { assertHitCount(searchResponse, 1); HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("keyword"); assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("Hello World")); + assertThat(field.getFragments()[0].string(), equalTo("hello world")); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index dc57ddf80d28d..e4c6e3b4d88ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -737,11 +737,7 @@ public void testSearchFieldsMetadata() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse searchResponse = client().prepareSearch("my-index") - .setTypes("my-type1") - .addStoredField("field1") - .addStoredField("_routing") - .get(); + SearchResponse searchResponse = client().prepareSearch("my-index").addStoredField("field1").addStoredField("_routing").get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); assertThat(searchResponse.getHits().getAt(0).field("field1"), nullValue()); @@ -755,7 +751,7 @@ public void testSearchFieldsNonLeafField() throws Exception { .get(); assertFailures( - client().prepareSearch("my-index").setTypes("my-type1").addStoredField("field1"), + client().prepareSearch("my-index").addStoredField("field1"), RestStatus.BAD_REQUEST, containsString("field [field1] isn't a leaf field") ); @@ -838,7 +834,6 @@ public void testSingleValueFieldDatatField() throws ExecutionException, Interrup indexRandom(true, client().prepareIndex("test", "type", "1").setSource("test_field", "foobar")); refresh(); SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("type") .setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field")) .get(); assertHitCount(searchResponse, 1); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java index 2c77e3d1d44e3..0f47877facaff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java @@ -645,14 +645,10 @@ public void testExceptionThrownIfScaleLE0() throws Exception { ) ); client().index( - indexRequest("test").type("type1") - .id("1") - .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject()) + indexRequest("test").id("1").source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject()) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("2") - .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-28").endObject()) + indexRequest("test").id("2").source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-28").endObject()) ).actionGet(); refresh(); @@ -690,13 +686,11 @@ public void testParseDateMath() throws Exception { ) ); client().index( - indexRequest("test").type("type1") - .id("1") + indexRequest("test").id("1") .source(jsonBuilder().startObject().field("test", "value").field("num1", System.currentTimeMillis()).endObject()) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("2") + indexRequest("test").id("2") .source( jsonBuilder().startObject() .field("test", "value") @@ -749,24 +743,18 @@ public void testValueMissingLin() throws Exception { ); client().index( - indexRequest("test").type("type1") - .id("1") + indexRequest("test").id("1") .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").field("num2", "1.0").endObject()) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("2") - .source(jsonBuilder().startObject().field("test", "value").field("num2", "1.0").endObject()) + indexRequest("test").id("2").source(jsonBuilder().startObject().field("test", "value").field("num2", "1.0").endObject()) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("3") + indexRequest("test").id("3") .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").field("num2", "1.0").endObject()) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("4") - .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").endObject()) + indexRequest("test").id("4").source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").endObject()) ).actionGet(); refresh(); @@ -827,9 +815,7 @@ public void testDateWithoutOrigin() throws Exception { + "-" + String.format(Locale.ROOT, "%02d", docDate.getDayOfMonth()); client().index( - indexRequest("test").type("type1") - .id("1") - .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) + indexRequest("test").id("1").source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) ).actionGet(); docDate = dt.minusDays(2); docDateString = docDate.getYear() @@ -838,9 +824,7 @@ public void testDateWithoutOrigin() throws Exception { + "-" + String.format(Locale.ROOT, "%02d", docDate.getDayOfMonth()); client().index( - indexRequest("test").type("type1") - .id("2") - .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) + indexRequest("test").id("2").source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) ).actionGet(); docDate = dt.minusDays(3); docDateString = docDate.getYear() @@ -849,9 +833,7 @@ public void testDateWithoutOrigin() throws Exception { + "-" + String.format(Locale.ROOT, "%02d", docDate.getDayOfMonth()); client().index( - indexRequest("test").type("type1") - .id("3") - .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) + indexRequest("test").id("3").source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) ).actionGet(); refresh(); @@ -987,16 +969,15 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { ); int numDocs = 2; client().index( - indexRequest("test").type("type") - .source( - jsonBuilder().startObject() - .field("test", "value") - .startObject("geo") - .field("lat", 1) - .field("lon", 2) - .endObject() - .endObject() - ) + indexRequest("test").source( + jsonBuilder().startObject() + .field("test", "value") + .startObject("geo") + .field("lat", 1) + .field("lon", 2) + .endObject() + .endObject() + ) ).actionGet(); refresh(); List lonlat = new ArrayList<>(); @@ -1040,8 +1021,7 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { ) ); client().index( - indexRequest("test").type("type") - .source(jsonBuilder().startObject().field("test", "value").field("num", Integer.toString(1)).endObject()) + indexRequest("test").source(jsonBuilder().startObject().field("test", "value").field("num", Integer.toString(1)).endObject()) ).actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field @@ -1079,9 +1059,8 @@ public void testNoQueryGiven() throws Exception { .endObject() ) ); - client().index( - indexRequest("test").type("type").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()) - ).actionGet(); + client().index(indexRequest("test").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject())) + .actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field ActionFuture response = client().search( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java index ca69c38d1fcda..af7633628dab1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java @@ -95,14 +95,10 @@ public void testPlugin() throws Exception { client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().get(); client().index( - indexRequest("test").type("type1") - .id("1") - .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-26").endObject()) + indexRequest("test").id("1").source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-26").endObject()) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("2") - .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject()) + indexRequest("test").id("2").source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject()) ).actionGet(); client().admin().indices().prepareRefresh().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceIT.java index a03f3a5f62343..ceac97f25c8d6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceIT.java @@ -220,7 +220,6 @@ public void testGeoDistanceAggregation() throws IOException { String name = "TestPosition"; search.setQuery(QueryBuilders.matchAllQuery()) - .setTypes("type1") .addAggregation( AggregationBuilders.geoDistance(name, new GeoPoint(tgt_lat, tgt_lon)) .field("location") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java index 0e412a51ee77f..478d018ed7fba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java @@ -413,7 +413,7 @@ public void testBulk() throws Exception { .endObject(); client().admin().indices().prepareCreate("countries").setSettings(settings).addMapping("country", xContentBuilder).get(); - BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, null, null, xContentBuilder.contentType()).get(); + BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, null, xContentBuilder.contentType()).get(); for (BulkItemResponse item : bulk.getItems()) { assertFalse("unable to index data", item.isFailed()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index 3dac4402a1ffc..0a2ddb607ccc5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -43,6 +43,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.MoreLikeThisQueryBuilder; import org.opensearch.index.query.MoreLikeThisQueryBuilder.Item; import org.opensearch.index.query.QueryBuilder; @@ -104,11 +105,9 @@ public void testSimpleMoreLikeThis() throws Exception { assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); logger.info("Indexing..."); - client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene").endObject())) + client().index(indexRequest("test").id("1").source(jsonBuilder().startObject().field("text", "lucene").endObject())).actionGet(); + client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); - client().index( - indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject()) - ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); logger.info("Running moreLikeThis"); @@ -139,16 +138,14 @@ public void testSimpleMoreLikeThisWithTypes() throws Exception { assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); logger.info("Indexing..."); - client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene").endObject())) + client().index(indexRequest("test").id("1").source(jsonBuilder().startObject().field("text", "lucene").endObject())).actionGet(); + client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); - client().index( - indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject()) - ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); logger.info("Running moreLikeThis"); SearchResponse response = client().prepareSearch() - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "type1", "1") }).minTermFreq(1).minDocFreq(1)) + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1)) .get(); assertHitCount(response, 1L); } @@ -176,14 +173,10 @@ public void testMoreLikeThisForZeroTokensInOneOfTheAnalyzedFields() throws Excep ensureGreen(); client().index( - indexRequest("test").type("type") - .id("1") - .source(jsonBuilder().startObject().field("myField", "and_foo").field("empty", "").endObject()) + indexRequest("test").id("1").source(jsonBuilder().startObject().field("myField", "and_foo").field("empty", "").endObject()) ).actionGet(); client().index( - indexRequest("test").type("type") - .id("2") - .source(jsonBuilder().startObject().field("myField", "and_foo").field("empty", "").endObject()) + indexRequest("test").id("2").source(jsonBuilder().startObject().field("myField", "and_foo").field("empty", "").endObject()) ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); @@ -205,13 +198,10 @@ public void testSimpleMoreLikeOnLongField() throws Exception { assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); logger.info("Indexing..."); - client().index( - indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("some_long", 1367484649580L).endObject()) - ).actionGet(); - client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("some_long", 0).endObject())) - .actionGet(); - client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("some_long", -666).endObject())) + client().index(indexRequest("test").id("1").source(jsonBuilder().startObject().field("some_long", 1367484649580L).endObject())) .actionGet(); + client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("some_long", 0).endObject())).actionGet(); + client().index(indexRequest("test").id("3").source(jsonBuilder().startObject().field("some_long", -666).endObject())).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); @@ -250,18 +240,14 @@ public void testMoreLikeThisWithAliases() throws Exception { assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); logger.info("Indexing..."); - client().index( - indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene beta").endObject()) - ).actionGet(); - client().index( - indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject()) - ).actionGet(); - client().index( - indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("text", "opensearch beta").endObject()) - ).actionGet(); - client().index( - indexRequest("test").type("type1").id("4").source(jsonBuilder().startObject().field("text", "opensearch release").endObject()) - ).actionGet(); + client().index(indexRequest("test").id("1").source(jsonBuilder().startObject().field("text", "lucene beta").endObject())) + .actionGet(); + client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) + .actionGet(); + client().index(indexRequest("test").id("3").source(jsonBuilder().startObject().field("text", "opensearch beta").endObject())) + .actionGet(); + client().index(indexRequest("test").id("4").source(jsonBuilder().startObject().field("text", "opensearch release").endObject())) + .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); logger.info("Running moreLikeThis on index"); @@ -307,19 +293,16 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); - client().index( - indexRequest(indexName).type(typeName).id("1").source(jsonBuilder().startObject().field("text", "opensearch index").endObject()) - ).actionGet(); - client().index( - indexRequest(indexName).type(typeName).id("2").source(jsonBuilder().startObject().field("text", "lucene index").endObject()) - ).actionGet(); - client().index( - indexRequest(indexName).type(typeName).id("3").source(jsonBuilder().startObject().field("text", "opensearch index").endObject()) - ).actionGet(); + client().index(indexRequest(indexName).id("1").source(jsonBuilder().startObject().field("text", "opensearch index").endObject())) + .actionGet(); + client().index(indexRequest(indexName).id("2").source(jsonBuilder().startObject().field("text", "lucene index").endObject())) + .actionGet(); + client().index(indexRequest(indexName).id("3").source(jsonBuilder().startObject().field("text", "opensearch index").endObject())) + .actionGet(); refresh(indexName); SearchResponse response = client().prepareSearch() - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, typeName, "1") }).minTermFreq(1).minDocFreq(1)) + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1)) .get(); assertHitCount(response, 2L); assertThat(response.getHits().getAt(0).getId(), equalTo("3")); @@ -337,11 +320,11 @@ public void testMoreLikeThisIssue2197() throws Exception { assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); SearchResponse response = client().prepareSearch() - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "bar", "1") })) + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1") })) .get(); assertNoFailures(response); assertThat(response, notNullValue()); - response = client().prepareSearch().setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "bar", "1") })).get(); + response = client().prepareSearch().setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1") })).get(); assertNoFailures(response); assertThat(response, notNullValue()); } @@ -361,7 +344,7 @@ public void testMoreLikeWithCustomRouting() throws Exception { client().admin().indices().prepareRefresh("foo").get(); SearchResponse response = client().prepareSearch() - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "bar", "1").routing("2") })) + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("2") })) .get(); assertNoFailures(response); assertThat(response, notNullValue()); @@ -387,7 +370,7 @@ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception { .get(); client().admin().indices().prepareRefresh("foo").get(); SearchResponse response = client().prepareSearch() - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "bar", "1").routing("4000") })) + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("4000") })) .get(); assertNoFailures(response); assertThat(response, notNullValue()); @@ -530,7 +513,7 @@ public void testMoreLikeThisWithFieldAlias() throws Exception { index("test", "_doc", "2", "text", "lucene release"); refresh(); - Item item = new Item("test", "_doc", "1"); + Item item = new Item("test", "1"); QueryBuilder query = QueryBuilders.moreLikeThisQuery(new String[] { "alias" }, null, new Item[] { item }) .minTermFreq(1) .minDocFreq(1); @@ -560,8 +543,7 @@ public void testSimpleMoreLikeInclude() throws Exception { logger.info("Indexing..."); client().index( - indexRequest("test").type("type1") - .id("1") + indexRequest("test").id("1") .source( jsonBuilder().startObject() .field("text", "Apache Lucene is a free/open source information retrieval software library") @@ -569,8 +551,7 @@ public void testSimpleMoreLikeInclude() throws Exception { ) ).actionGet(); client().index( - indexRequest("test").type("type1") - .id("2") + indexRequest("test").id("2") .source(jsonBuilder().startObject().field("text", "Lucene has been ported to other programming languages").endObject()) ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); @@ -588,7 +569,7 @@ public void testSimpleMoreLikeInclude() throws Exception { response = client().prepareSearch() .setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "type1", "2") }).minTermFreq(1) + new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "2") }).minTermFreq(1) .minDocFreq(1) .include(true) .minimumShouldMatch("0%") @@ -635,12 +616,12 @@ public void testSimpleMoreLikeThisIds() throws Exception { indexRandom(true, builders); logger.info("Running MoreLikeThis"); - Item[] items = new Item[] { new Item(null, null, "1") }; + Item[] items = new Item[] { new Item(null, "1") }; MoreLikeThisQueryBuilder queryBuilder = QueryBuilders.moreLikeThisQuery(new String[] { "text" }, null, items) .include(true) .minTermFreq(1) .minDocFreq(1); - SearchResponse mltResponse = client().prepareSearch().setTypes("type1").setQuery(queryBuilder).get(); + SearchResponse mltResponse = client().prepareSearch().setQuery(queryBuilder).get(); assertHitCount(mltResponse, 3L); } @@ -667,12 +648,12 @@ public void testMoreLikeThisMultiValueFields() throws Exception { for (int i = 0; i < maxIters; i++) { int max_query_terms = randomIntBetween(1, values.length); logger.info("Running More Like This with max_query_terms = {}", max_query_terms); - MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new String[] { "text" }, null, new Item[] { new Item(null, null, "0") }) + MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new String[] { "text" }, null, new Item[] { new Item(null, "0") }) .minTermFreq(1) .minDocFreq(1) .maxQueryTerms(max_query_terms) .minimumShouldMatch("0%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + SearchResponse response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, max_query_terms); } @@ -705,7 +686,7 @@ public void testMinimumShouldMatch() throws ExecutionException, InterruptedExcep .minDocFreq(1) .minimumShouldMatch(minimumShouldMatch); logger.info("Testing with minimum_should_match = {}", minimumShouldMatch); - SearchResponse response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + SearchResponse response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); if (minimumShouldMatch.equals("0%")) { assertHitCount(response, 10); @@ -731,24 +712,32 @@ public void testMoreLikeThisArtificialDocs() throws Exception { logger.info("Checking the document matches ..."); // routing to ensure we hit the shard with the doc - MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new Item[] { new Item("test", "type1", doc).routing("0") }).minTermFreq(0) + MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new Item[] { new Item("test", doc).routing("0") }).minTermFreq(0) .minDocFreq(0) .maxQueryTerms(100) .minimumShouldMatch("100%"); // strict all terms must match! - SearchResponse response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + SearchResponse response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 1); } public void testMoreLikeThisMalformedArtificialDocs() throws Exception { logger.info("Creating the index ..."); - assertAcked(prepareCreate("test").addMapping("type1", "text", "type=text,analyzer=whitespace", "date", "type=date")); + assertAcked( + prepareCreate("test").addMapping( + MapperService.SINGLE_MAPPING_NAME, + "text", + "type=text,analyzer=whitespace", + "date", + "type=date" + ) + ); ensureGreen("test"); logger.info("Creating an index with a single document ..."); indexRandom( true, - client().prepareIndex("test", "type1", "1") + client().prepareIndex("test", MapperService.SINGLE_MAPPING_NAME, "1") .setSource(jsonBuilder().startObject().field("text", "Hello World!").field("date", "2009-01-01").endObject()) ); @@ -757,19 +746,17 @@ public void testMoreLikeThisMalformedArtificialDocs() throws Exception { .field("text", "Hello World!") .field("date", "this is not a date!") .endObject(); - MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new Item[] { new Item("test", "type1", malformedFieldDoc) }).minTermFreq(0) + MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new Item[] { new Item("test", malformedFieldDoc) }).minTermFreq(0) .minDocFreq(0) .minimumShouldMatch("0%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + SearchResponse response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 0); logger.info("Checking with an empty document ..."); XContentBuilder emptyDoc = jsonBuilder().startObject().endObject(); - mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", "type1", emptyDoc) }).minTermFreq(0) - .minDocFreq(0) - .minimumShouldMatch("0%"); - response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", emptyDoc) }).minTermFreq(0).minDocFreq(0).minimumShouldMatch("0%"); + response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 0); @@ -778,10 +765,10 @@ public void testMoreLikeThisMalformedArtificialDocs() throws Exception { .field("text", "Hello World!") .field("date", "1000-01-01") // should be properly parsed but ignored ... .endObject(); - mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", "type1", normalDoc) }).minTermFreq(0) + mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", normalDoc) }).minTermFreq(0) .minDocFreq(0) .minimumShouldMatch("100%"); // strict all terms must match but date is ignored - response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 1); } @@ -806,26 +793,26 @@ public void testMoreLikeThisUnlike() throws ExecutionException, InterruptedExcep indexRandom(true, builders); logger.info("First check the document matches all indexed docs."); - MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new Item[] { new Item("test", "type1", doc) }).minTermFreq(0) + MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new Item[] { new Item("test", doc) }).minTermFreq(0) .minDocFreq(0) .maxQueryTerms(100) .minimumShouldMatch("0%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + SearchResponse response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, numFields); logger.info("Now check like this doc, but ignore one doc in the index, then two and so on..."); List docs = new ArrayList<>(numFields); for (int i = 0; i < numFields; i++) { - docs.add(new Item("test", "type1", i + "")); - mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", "type1", doc) }).unlike(docs.toArray(new Item[docs.size()])) + docs.add(new Item("test", i + "")); + mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", doc) }).unlike(docs.toArray(new Item[docs.size()])) .minTermFreq(0) .minDocFreq(0) .maxQueryTerms(100) .include(true) .minimumShouldMatch("0%"); - response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, numFields - (i + 1)); } @@ -849,7 +836,7 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt .minDocFreq(0) .include(true) .minimumShouldMatch("1%"); - SearchResponse response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + SearchResponse response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 2); @@ -857,7 +844,7 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt .minDocFreq(0) .include(true) .minimumShouldMatch("1%"); - response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get(); + response = client().prepareSearch("test").setQuery(mltQuery).get(); assertSearchResponse(response); assertHitCount(response, 1); } @@ -868,7 +855,7 @@ public void testWithRouting() throws IOException { client().prepareIndex("index", "type", "3").setRouting("4").setSource("text", "this is yet another document").get(); refresh("index"); - Item item = new Item("index", "type", "2").routing("1"); + Item item = new Item("index", "2").routing("1"); MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = new MoreLikeThisQueryBuilder( new String[] { "text" }, null, @@ -915,7 +902,7 @@ public void testWithMissingRouting() throws IOException { Throwable cause = exception.getCause(); assertThat(cause, instanceOf(RoutingMissingException.class)); - assertThat(cause.getMessage(), equalTo("routing is required for [test]/[_doc]/[1]")); + assertThat(cause.getMessage(), equalTo("routing is required for [test]/[1]")); } { @@ -926,10 +913,7 @@ public void testWithMissingRouting() throws IOException { .setQuery( new MoreLikeThisQueryBuilder( null, - new Item[] { - new Item("test", "1").routing("1"), - new Item("test", "type1", "2"), - new Item("test", "type1", "3") } + new Item[] { new Item("test", "1").routing("1"), new Item("test", "2"), new Item("test", "3") } ).minTermFreq(1).minDocFreq(1) ) .get() @@ -937,7 +921,7 @@ public void testWithMissingRouting() throws IOException { Throwable cause = exception.getCause(); assertThat(cause, instanceOf(RoutingMissingException.class)); - assertThat(cause.getMessage(), equalTo("routing is required for [test]/[type1]/[2]")); + assertThat(cause.getMessage(), equalTo("routing is required for [test]/[2]")); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index bac944cc51a78..dac0a5d01b516 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -101,7 +101,7 @@ public void testSimpleNested() throws Exception { .get(); waitForRelocation(ClusterHealthStatus.GREEN); - GetResponse getResponse = client().prepareGet("test", "type1", "1").get(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsBytes(), notNullValue()); refresh(); @@ -263,7 +263,7 @@ public void testMultiNested() throws Exception { ) .get(); - GetResponse getResponse = client().prepareGet("test", "type1", "1").get(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); waitForRelocation(ClusterHealthStatus.GREEN); refresh(); @@ -548,7 +548,6 @@ public void testSimpleNestedSorting() throws Exception { refresh(); SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("type1") .setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedPath("nested1")) .get(); @@ -562,7 +561,6 @@ public void testSimpleNestedSorting() throws Exception { assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); searchResponse = client().prepareSearch("test") - .setTypes("type1") .setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedPath("nested1")) .get(); @@ -658,7 +656,6 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { refresh(); SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test") - .setTypes("type1") .setQuery(QueryBuilders.matchAllQuery()) .addSort( SortBuilders.fieldSort("nested1.field1") @@ -683,7 +680,6 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); searchRequestBuilder = client().prepareSearch("test") - .setTypes("type1") .setQuery(QueryBuilders.matchAllQuery()) .addSort( SortBuilders.fieldSort("nested1.field1") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 96e7fd0b843fa..589c30894e3d1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -516,7 +516,6 @@ public void testPhrase() throws Exception { SearchResponse resp = client().prepareSearch() .setQuery(q) .setIndices("test") - .setTypes("type1") .setProfile(true) .setSearchType(SearchType.QUERY_THEN_FETCH) .get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java index 113bdd5027c5d..2b2a36ea76d8a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java @@ -156,9 +156,8 @@ public void testExists() throws Exception { } catch (AssertionError e) { for (SearchHit searchHit : allDocs.getHits()) { final String index = searchHit.getIndex(); - final String type = searchHit.getType(); final String id = searchHit.getId(); - final ExplainResponse explanation = client().prepareExplain(index, type, id) + final ExplainResponse explanation = client().prepareExplain(index, id) .setQuery(QueryBuilders.existsQuery(fieldName)) .get(); logger.info( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 38ab776aa94d0..dc0bcafa43f37 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -45,7 +45,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.bootstrap.JavaVersion; -import org.opensearch.common.Strings; import org.opensearch.common.document.DocumentField; import org.opensearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; import org.opensearch.common.regex.Regex; @@ -124,7 +123,6 @@ import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.QueryBuilders.termsLookupQuery; import static org.opensearch.index.query.QueryBuilders.termsQuery; -import static org.opensearch.index.query.QueryBuilders.typeQuery; import static org.opensearch.index.query.QueryBuilders.wildcardQuery; import static org.opensearch.index.query.QueryBuilders.wrapperQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -557,23 +555,6 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { assertHitCount(searchResponse, 0L); } - public void testTypeFilter() throws Exception { - assertAcked(prepareCreate("test")); - indexRandom( - true, - client().prepareIndex("test", "type1", "1").setSource("field1", "value1"), - client().prepareIndex("test", "type1", "2").setSource("field1", "value1") - ); - - assertHitCount(client().prepareSearch().setQuery(typeQuery("type1")).get(), 2L); - assertHitCount(client().prepareSearch().setQuery(typeQuery("type2")).get(), 0L); - - assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2L); - assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 0L); - - assertHitCount(client().prepareSearch().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 2L); - } - public void testIdsQueryTestsIdIndexed() throws Exception { assertAcked(client().admin().indices().prepareCreate("test")); @@ -584,29 +565,19 @@ public void testIdsQueryTestsIdIndexed() throws Exception { client().prepareIndex("test", "type1", "3").setSource("field1", "value3") ); - SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1").addIds("1", "3"))).get(); - assertHitCount(searchResponse, 2L); - assertSearchHits(searchResponse, "1", "3"); - - // no type - searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().addIds("1", "3"))).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().addIds("1", "3"))).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1").addIds("1", "3")).get(); - assertHitCount(searchResponse, 2L); - assertSearchHits(searchResponse, "1", "3"); - - // no type searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "3")).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1").addIds("7", "10")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("7", "10")).get(); assertHitCount(searchResponse, 0L); // repeat..., with terms - searchResponse = client().prepareSearch().setTypes("type1").setQuery(constantScoreQuery(termsQuery("_id", "1", "3"))).get(); + searchResponse = client().prepareSearch().setQuery(constantScoreQuery(termsQuery("_id", "1", "3"))).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); } @@ -1298,7 +1269,7 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test", "_doc", "3").setSource("field1", "value3").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("_doc").addIds("1", "2")).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -1310,11 +1281,11 @@ public void testBasicQueryById() throws Exception { assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - searchResponse = client().prepareSearch().setQuery(idsQuery(Strings.EMPTY_ARRAY).addIds("1")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1")).get(); assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "_doc").addIds("1", "2", "3", "4")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")).get(); assertHitCount(searchResponse, 3L); assertThat(searchResponse.getHits().getHits().length, equalTo(3)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 1e74f9a84e863..d23ddabedd348 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -264,7 +264,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); - searchResponse = client().prepareSearch().setTypes("type1").setQuery(simpleQueryStringQuery("foo bar baz").field("body")).get(); + searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body")).get(); assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); @@ -272,7 +272,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); - searchResponse = client().prepareSearch().setTypes("type1").setQuery(simpleQueryStringQuery("foo bar baz").field("body.sub")).get(); + searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body.sub")).get(); assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index eeb4b9d156517..a56c014a08ba5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -543,7 +543,6 @@ public void testStringSortMissingAscTerminates() throws Exception { refresh(); SearchResponse response = client().prepareSearch("test") - .setTypes("test") .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_last")) .setScroll("1m") .get(); @@ -556,7 +555,6 @@ public void testStringSortMissingAscTerminates() throws Exception { assertNoSearchHits(response); response = client().prepareSearch("test") - .setTypes("test") .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_first")) .setScroll("1m") .get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 3774f4e7c7f4c..f964baead2534 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -590,7 +590,7 @@ public void testGeoDistanceFilter() throws IOException { assertAcked(prepareCreate("locations").setSettings(settings).addMapping("location", mapping)); client().prepareIndex("locations", "location", "1").setCreate(true).setSource(source).get(); refresh(); - client().prepareGet("locations", "location", "1").get(); + client().prepareGet("locations", "1").get(); SearchResponse result = client().prepareSearch("locations") .setQuery(QueryBuilders.matchAllQuery()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java new file mode 100644 index 0000000000000..b36168f2a110f --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.sort; + +import static org.hamcrest.Matchers.equalTo; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.plugin.CustomSortBuilder; +import org.opensearch.search.sort.plugin.CustomSortPlugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +public class SortFromPluginIT extends OpenSearchIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(CustomSortPlugin.class, InternalSettingsPlugin.class); + } + + public void testPluginSort() throws Exception { + createIndex("test"); + ensureGreen(); + + client().prepareIndex("test", "type", "1").setSource("field", 2).get(); + client().prepareIndex("test", "type", "2").setSource("field", 1).get(); + client().prepareIndex("test", "type", "3").setSource("field", 0).get(); + + refresh(); + + SearchResponse searchResponse = client().prepareSearch("test").addSort(new CustomSortBuilder("field", SortOrder.ASC)).get(); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("3")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("1")); + + searchResponse = client().prepareSearch("test").addSort(new CustomSortBuilder("field", SortOrder.DESC)).get(); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + } + + public void testPluginSortXContent() throws Exception { + createIndex("test"); + ensureGreen(); + + client().prepareIndex("test", "type", "1").setSource("field", 2).get(); + client().prepareIndex("test", "type", "2").setSource("field", 1).get(); + client().prepareIndex("test", "type", "3").setSource("field", 0).get(); + + refresh(); + + // builder -> json -> builder + SearchResponse searchResponse = client().prepareSearch("test") + .setSource( + SearchSourceBuilder.fromXContent( + createParser( + JsonXContent.jsonXContent, + new SearchSourceBuilder().sort(new CustomSortBuilder("field", SortOrder.ASC)).toString() + ) + ) + ) + .get(); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("3")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("1")); + + searchResponse = client().prepareSearch("test") + .setSource( + SearchSourceBuilder.fromXContent( + createParser( + JsonXContent.jsonXContent, + new SearchSourceBuilder().sort(new CustomSortBuilder("field", SortOrder.DESC)).toString() + ) + ) + ) + .get(); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index d2ca32173a978..438089ba65bd0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -60,13 +60,11 @@ public void testSimple() { SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); response = client().prepareSearch("test").storedFields("_none_").get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } @@ -88,13 +86,11 @@ public void testInnerHits() { .get(); assertThat(response.getHits().getTotalHits().value, equalTo(1L)); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); assertThat(hits.getTotalHits().value, equalTo(1L)); assertThat(hits.getAt(0).getId(), nullValue()); - assertThat(hits.getAt(0).getType(), equalTo("_doc")); assertThat(hits.getAt(0).getSourceAsString(), nullValue()); } @@ -107,13 +103,11 @@ public void testWithRouting() { SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); response = client().prepareSearch("test").storedFields("_none_").get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getType(), equalTo("_doc")); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index d7a79e473b64f..c42602fc6c569 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -1031,7 +1031,7 @@ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exce SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> client().prepareSearch(INDEX).setTypes(TYPE).addSort(new FieldSortBuilder(FIELD)).get() + () -> client().prepareSearch(INDEX).addSort(new FieldSortBuilder(FIELD)).get() ); assertThat(e.status().getStatus(), is(400)); assertThat(e.toString(), containsString("Fielddata is not supported on field [" + FIELD + "] of type [completion]")); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index b2e8f2df1e2f3..a131ab9ff70e7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -147,8 +147,8 @@ public void testParallelRestoreOperations() { assertThat(restoreSnapshotResponse1.status(), equalTo(RestStatus.ACCEPTED)); assertThat(restoreSnapshotResponse2.status(), equalTo(RestStatus.ACCEPTED)); ensureGreen(restoredIndexName1, restoredIndexName2); - assertThat(client.prepareGet(restoredIndexName1, "_doc", docId).get().isExists(), equalTo(true)); - assertThat(client.prepareGet(restoredIndexName2, "_doc", docId2).get().isExists(), equalTo(true)); + assertThat(client.prepareGet(restoredIndexName1, docId).get().isExists(), equalTo(true)); + assertThat(client.prepareGet(restoredIndexName2, docId2).get().isExists(), equalTo(true)); } public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { @@ -206,8 +206,8 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { assertThat(restoreSnapshotResponse1.get().status(), equalTo(RestStatus.ACCEPTED)); assertThat(restoreSnapshotResponse2.get().status(), equalTo(RestStatus.ACCEPTED)); ensureGreen(restoredIndexName1, restoredIndexName2); - assertThat(client.prepareGet(restoredIndexName1, "_doc", docId).get().isExists(), equalTo(true)); - assertThat(client.prepareGet(restoredIndexName2, "_doc", sameSourceIndex ? docId : docId2).get().isExists(), equalTo(true)); + assertThat(client.prepareGet(restoredIndexName1, docId).get().isExists(), equalTo(true)); + assertThat(client.prepareGet(restoredIndexName2, sameSourceIndex ? docId : docId2).get().isExists(), equalTo(true)); } public void testRestoreIncreasesPrimaryTerms() { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index 441acc5ce0566..f3190585cff85 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -73,6 +73,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineTestCase; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.IndicesService; @@ -102,7 +103,6 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; -import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.index.shard.IndexShardTests.getEngineFromShard; import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -289,12 +289,11 @@ public void testSingleGetAfterRestore() throws Exception { Path absolutePath = randomRepoPath().toAbsolutePath(); logger.info("Path [{}]", absolutePath); String restoredIndexName = indexName + "-restored"; - String typeName = "actions"; String expectedValue = "expected"; // Write a document String docId = Integer.toString(randomInt()); - index(indexName, typeName, docId, "value", expectedValue); + index(indexName, MapperService.SINGLE_MAPPING_NAME, docId, "value", expectedValue); createRepository(repoName, "fs", absolutePath); createSnapshot(repoName, snapshotName, Collections.singletonList(indexName)); @@ -306,7 +305,7 @@ public void testSingleGetAfterRestore() throws Exception { .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - assertThat(client().prepareGet(restoredIndexName, typeName, docId).get().isExists(), equalTo(true)); + assertThat(client().prepareGet(restoredIndexName, docId).get().isExists(), equalTo(true)); } public void testFreshIndexUUID() { @@ -1416,12 +1415,8 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { List shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { // we flush before the snapshot such that we have to process the segments_N files plus the .del file - if (INDEX_SOFT_DELETES_SETTING.get(settings)) { - // soft-delete generates DV files. - assertThat(status.getStats().getProcessedFileCount(), greaterThan(2)); - } else { - assertThat(status.getStats().getProcessedFileCount(), equalTo(2)); - } + // soft-delete generates DV files. + assertThat(status.getStats().getProcessedFileCount(), greaterThan(2)); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java index 661c572d4c959..8f0188c592527 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java @@ -176,7 +176,7 @@ public void testUpsert() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1")); } @@ -189,7 +189,7 @@ public void testUpsert() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2")); } } @@ -219,7 +219,7 @@ public void testScriptedUpsert() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("9")); } @@ -234,7 +234,7 @@ public void testScriptedUpsert() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("7")); } } @@ -326,7 +326,7 @@ public void testUpdate() throws Exception { DocumentMissingException.class, () -> client().prepareUpdate(indexOrAlias(), "type1", "1").setScript(fieldIncScript).execute().actionGet() ); - assertEquals("[type1][1]: document missing", ex.getMessage()); + assertEquals("[1]: document missing", ex.getMessage()); client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); @@ -339,7 +339,7 @@ public void testUpdate() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2")); } @@ -355,7 +355,7 @@ public void testUpdate() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5")); } @@ -376,7 +376,7 @@ public void testUpdate() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5")); } @@ -397,7 +397,7 @@ public void testUpdate() throws Exception { assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(false)); } @@ -423,7 +423,7 @@ public void testUpdate() throws Exception { .execute() .actionGet(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1")); assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2")); } @@ -434,7 +434,7 @@ public void testUpdate() throws Exception { .execute() .actionGet(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3")); assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2")); } @@ -455,7 +455,7 @@ public void testUpdate() throws Exception { .execute() .actionGet(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); Map map1 = (Map) getResponse.getSourceAsMap().get("map"); assertThat(map1.size(), equalTo(3)); assertThat(map1.containsKey("map1"), equalTo(true)); @@ -575,10 +575,9 @@ public void testContextVariables() throws Exception { assertEquals(2, updateResponse.getVersion()); - GetResponse getResponse = client().prepareGet("test", "type1", "id1").setRouting("routing1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "id1").setRouting("routing1").execute().actionGet(); Map updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); assertEquals("test", updateContext.get("_index")); - assertEquals("type1", updateContext.get("_type")); assertEquals("id1", updateContext.get("_id")); assertEquals(1, updateContext.get("_version")); assertEquals("routing1", updateContext.get("_routing")); @@ -591,10 +590,9 @@ public void testContextVariables() throws Exception { assertEquals(2, updateResponse.getVersion()); - getResponse = client().prepareGet("test", "type1", "id2").execute().actionGet(); + getResponse = client().prepareGet("test", "id2").execute().actionGet(); updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); assertEquals("test", updateContext.get("_index")); - assertEquals("type1", updateContext.get("_type")); assertEquals("id2", updateContext.get("_id")); assertEquals(1, updateContext.get("_version")); assertNull(updateContext.get("_routing")); @@ -675,7 +673,7 @@ public void run() { } assertThat(failures.size(), equalTo(0)); for (int i = 0; i < numberOfUpdatesPerThread; i++) { - GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet(); + GetResponse response = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); assertThat(response.getId(), equalTo(Integer.toString(i))); assertThat(response.isExists(), equalTo(true)); assertThat(response.getVersion(), equalTo((long) numberOfThreads)); @@ -892,7 +890,7 @@ private void waitForOutstandingRequests(TimeValue timeOut, Semaphore requestsOut for (int i = 0; i < numberOfIdsPerThread; ++i) { int totalFailures = 0; - GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet(); + GetResponse response = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); if (response.isExists()) { assertThat(response.getId(), equalTo(Integer.toString(i))); int expectedVersion = (numberOfThreads * numberOfUpdatesPerId * 2) + 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java index 67c1ff681cb60..98063c58b90a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java @@ -357,7 +357,7 @@ public void testExplainWithRewriteValidateQuery() throws Exception { assertExplanation(QueryBuilders.fuzzyQuery("field", "jump"), containsString("(field:jumps)^0.75"), true); // more like this queries - Item[] items = new Item[] { new Item(null, null, "1") }; + Item[] items = new Item[] { new Item(null, "1") }; assertExplanation( QueryBuilders.moreLikeThisQuery(new String[] { "field" }, null, items) .include(true) diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java index 1eb4d088d260d..81e27c64f821a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java @@ -77,9 +77,9 @@ public void onFailure(Exception e) { client().admin().indices().prepareRefresh().execute().actionGet(); logger.info("done indexing, check all have the same field value"); - Map masterSource = client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(); + Map masterSource = client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(); for (int i = 0; i < (cluster().size() * 5); i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource)); + assertThat(client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource)); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java index c1dd439cce4aa..a14e1279c1051 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -255,7 +255,8 @@ public void run() { version = version.previousTerm(); } - IndexRequest indexRequest = new IndexRequest("test", "type", partition.id).source("value", random.nextInt()) + IndexRequest indexRequest = new IndexRequest("test").id(partition.id) + .source("value", random.nextInt()) .setIfPrimaryTerm(version.primaryTerm) .setIfSeqNo(version.seqNo); Consumer historyResponse = partition.invoke(version); diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java index 5edb3cf5f314d..9cbcc19cb47eb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java @@ -135,7 +135,7 @@ public void testExternalGTE() throws Exception { refresh(); } for (int i = 0; i < 10; i++) { - assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(14L)); + assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(14L)); } // deleting with a lower version fails. @@ -203,7 +203,7 @@ public void testExternalVersioning() throws Exception { refresh(); } for (int i = 0; i < 10; i++) { - assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(14L)); + assertThat(client().prepareGet("test", "1").execute().actionGet().getVersion(), equalTo(14L)); } // deleting with a lower version fails. @@ -349,7 +349,7 @@ public void testCompareAndSet() { client().admin().indices().prepareRefresh().execute().actionGet(); for (int i = 0; i < 10; i++) { - final GetResponse response = client().prepareGet("test", "type", "1").get(); + final GetResponse response = client().prepareGet("test", "1").get(); assertThat(response.getSeqNo(), equalTo(1L)); assertThat(response.getPrimaryTerm(), equalTo(1L)); } @@ -420,7 +420,7 @@ public void testSimpleVersioningWithFlush() throws Exception { ); for (int i = 0; i < 10; i++) { - assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2L)); + assertThat(client().prepareGet("test", "1").execute().actionGet().getVersion(), equalTo(2L)); } client().admin().indices().prepareRefresh().execute().actionGet(); @@ -578,8 +578,6 @@ public String toString() { sb.append(deleteResponse.getIndex()); sb.append(" id="); sb.append(deleteResponse.getId()); - sb.append(" type="); - sb.append(deleteResponse.getType()); sb.append(" version="); sb.append(deleteResponse.getVersion()); sb.append(" found="); @@ -590,8 +588,6 @@ public String toString() { sb.append(indexResponse.getIndex()); sb.append(" id="); sb.append(indexResponse.getId()); - sb.append(" type="); - sb.append(indexResponse.getType()); sb.append(" version="); sb.append(indexResponse.getVersion()); sb.append(" created="); @@ -787,7 +783,7 @@ public void run() { } else { expected = -1; } - long actualVersion = client().prepareGet("test", "type", id).execute().actionGet().getVersion(); + long actualVersion = client().prepareGet("test", id).execute().actionGet().getVersion(); if (actualVersion != expected) { logger.error("--> FAILED: idVersion={} actualVersion= {}", idVersion, actualVersion); failed = true; @@ -839,11 +835,7 @@ public void testDeleteNotLost() throws Exception { client().prepareDelete("test", "type", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); // Real-time get should reflect delete: - assertThat( - "doc should have been deleted", - client().prepareGet("test", "type", "id").execute().actionGet().getVersion(), - equalTo(-1L) - ); + assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); // ThreadPool.relativeTimeInMillis has default granularity of 200 msec, so we must sleep at least that long; sleep much longer in // case system is busy: @@ -853,11 +845,7 @@ public void testDeleteNotLost() throws Exception { client().prepareDelete("test", "type", "id2").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); // Real-time get should still reflect delete: - assertThat( - "doc should have been deleted", - client().prepareGet("test", "type", "id").execute().actionGet().getVersion(), - equalTo(-1L) - ); + assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); } public void testGCDeletesZero() throws Exception { @@ -887,11 +875,7 @@ public void testGCDeletesZero() throws Exception { client().prepareDelete("test", "type", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); // Real-time get should reflect delete even though index.gc_deletes is 0: - assertThat( - "doc should have been deleted", - client().prepareGet("test", "type", "id").execute().actionGet().getVersion(), - equalTo(-1L) - ); + assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); } public void testSpecialVersioning() { diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java index bb5da686f522a..82778d31f6c2c 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java @@ -43,8 +43,8 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; -import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.io.stream.BytesStreamInput; import org.opensearch.index.mapper.RangeType; import java.io.IOException; @@ -91,7 +91,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException { final TwoPhaseIterator iterator = new TwoPhaseIterator(values) { - ByteArrayDataInput in = new ByteArrayDataInput(); + BytesStreamInput in = new BytesStreamInput(); BytesRef otherFrom = new BytesRef(); BytesRef otherTo = new BytesRef(); diff --git a/server/src/main/java/org/opensearch/ExceptionsHelper.java b/server/src/main/java/org/opensearch/ExceptionsHelper.java index 1d3f2c0afce23..418bf9811a7b3 100644 --- a/server/src/main/java/org/opensearch/ExceptionsHelper.java +++ b/server/src/main/java/org/opensearch/ExceptionsHelper.java @@ -297,25 +297,22 @@ public static Optional maybeError(final Throwable cause) { * @param throwable the throwable to possibly throw on another thread */ public static void maybeDieOnAnotherThread(final Throwable throwable) { - ExceptionsHelper.maybeError(throwable) - .ifPresent( - error -> { - /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack - * contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here - * will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the - * stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause - * during exit. - */ - try { - // try to log the current stack trace - final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); - logger.error("fatal error\n{}", formatted); - } finally { - new Thread(() -> { throw error; }).start(); - } - } - ); + ExceptionsHelper.maybeError(throwable).ifPresent(error -> { + /* + * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack + * contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here + * will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the + * stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause + * during exit. + */ + try { + // try to log the current stack trace + final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); + logger.error("fatal error\n{}", formatted); + } finally { + new Thread(() -> { throw error; }).start(); + } + }); } /** diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 1dc61f6cec0d7..536e450da4a98 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -77,6 +77,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_2_2 = new Version(1020299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_2_3 = new Version(1020399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_2_4 = new Version(1020499, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_2_5 = new Version(1020599, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_0 = new Version(1030099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version CURRENT = V_2_0_0; diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 7905abe4b5533..752f02d03cb16 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -140,9 +140,7 @@ import org.opensearch.action.admin.indices.exists.types.TransportTypesExistsAction; import org.opensearch.action.admin.indices.exists.types.TypesExistsAction; import org.opensearch.action.admin.indices.flush.FlushAction; -import org.opensearch.action.admin.indices.flush.SyncedFlushAction; import org.opensearch.action.admin.indices.flush.TransportFlushAction; -import org.opensearch.action.admin.indices.flush.TransportSyncedFlushAction; import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction; import org.opensearch.action.admin.indices.forcemerge.TransportForceMergeAction; import org.opensearch.action.admin.indices.get.GetIndexAction; @@ -592,7 +590,6 @@ public void reg actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class); actions.register(PublishCheckpointAction.INSTANCE, TransportPublishCheckpointAction.class); actions.register(FlushAction.INSTANCE, TransportFlushAction.class); - actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class); actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index 93926923de57e..11d645435c71c 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -71,26 +71,6 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { */ String index(); - /** - * Set the type for this request - * @return the Request - */ - T type(String type); - - /** - * Get the type that this request operates on - * @return the type - */ - String type(); - - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * @return the Request - */ - T defaultTypeIfNull(String defaultType); - /** * Get the id of the document for this request * @return the id diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java index 9cec09b9d8b10..587f93ed09f52 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java @@ -31,6 +31,7 @@ package org.opensearch.action; +import org.opensearch.Version; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.action.support.WriteResponse; @@ -45,6 +46,7 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.Index; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; @@ -66,7 +68,6 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr private static final String _SHARDS = "_shards"; private static final String _INDEX = "_index"; - private static final String _TYPE = "_type"; private static final String _ID = "_id"; private static final String _VERSION = "_version"; private static final String _SEQ_NO = "_seq_no"; @@ -127,16 +128,14 @@ public void writeTo(StreamOutput out) throws IOException { private final ShardId shardId; private final String id; - private final String type; private final long version; private final long seqNo; private final long primaryTerm; private boolean forcedRefresh; protected final Result result; - public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { + public DocWriteResponse(ShardId shardId, String id, long seqNo, long primaryTerm, long version, Result result) { this.shardId = Objects.requireNonNull(shardId); - this.type = Objects.requireNonNull(type); this.id = Objects.requireNonNull(id); this.seqNo = seqNo; this.primaryTerm = primaryTerm; @@ -148,7 +147,10 @@ public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, lon protected DocWriteResponse(ShardId shardId, StreamInput in) throws IOException { super(in); this.shardId = shardId; - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + String type = in.readString(); + assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; + } id = in.readString(); version = in.readZLong(); seqNo = in.readZLong(); @@ -164,7 +166,10 @@ protected DocWriteResponse(ShardId shardId, StreamInput in) throws IOException { protected DocWriteResponse(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + String type = in.readString(); + assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; + } id = in.readString(); version = in.readZLong(); seqNo = in.readZLong(); @@ -194,16 +199,6 @@ public ShardId getShardId() { return this.shardId; } - /** - * The type of the document changed. - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public String getType() { - return this.type; - } - /** * The id of the document changed. */ @@ -270,7 +265,7 @@ public String getLocation(@Nullable String routing) { try { // encode the path components separately otherwise the path separators will be encoded encodedIndex = URLEncoder.encode(getIndex(), "UTF-8"); - encodedType = URLEncoder.encode(getType(), "UTF-8"); + encodedType = URLEncoder.encode(MapperService.SINGLE_MAPPING_NAME, "UTF-8"); encodedId = URLEncoder.encode(getId(), "UTF-8"); encodedRouting = routing == null ? null : URLEncoder.encode(routing, "UTF-8"); } catch (final UnsupportedEncodingException e) { @@ -308,7 +303,9 @@ public void writeTo(StreamOutput out) throws IOException { } private void writeWithoutShardId(StreamOutput out) throws IOException { - out.writeString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeZLong(version); out.writeZLong(seqNo); @@ -328,7 +325,6 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { ReplicationResponse.ShardInfo shardInfo = getShardInfo(); builder.field(_INDEX, shardId.getIndexName()); - builder.field(_TYPE, type); builder.field(_ID, id).field(_VERSION, version).field(RESULT, getResult().getLowercase()); if (forcedRefresh) { builder.field(FORCED_REFRESH, true); @@ -359,8 +355,6 @@ protected static void parseInnerToXContent(XContentParser parser, Builder contex if (_INDEX.equals(currentFieldName)) { // index uuid and shard id are unknown and can't be parsed back for now. context.setShardId(new ShardId(new Index(parser.text(), IndexMetadata.INDEX_UUID_NA_VALUE), -1)); - } else if (_TYPE.equals(currentFieldName)) { - context.setType(parser.text()); } else if (_ID.equals(currentFieldName)) { context.setId(parser.text()); } else if (_VERSION.equals(currentFieldName)) { @@ -399,7 +393,6 @@ protected static void parseInnerToXContent(XContentParser parser, Builder contex public abstract static class Builder { protected ShardId shardId = null; - protected String type = null; protected String id = null; protected Long version = null; protected Result result = null; @@ -416,14 +409,6 @@ public void setShardId(ShardId shardId) { this.shardId = shardId; } - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - public String getId() { return id; } diff --git a/server/src/main/java/org/opensearch/action/RoutingMissingException.java b/server/src/main/java/org/opensearch/action/RoutingMissingException.java index ac6c4287849de..4f34a7847da4d 100644 --- a/server/src/main/java/org/opensearch/action/RoutingMissingException.java +++ b/server/src/main/java/org/opensearch/action/RoutingMissingException.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import java.io.IOException; @@ -46,8 +47,12 @@ public class RoutingMissingException extends OpenSearchException { private final String id; + public RoutingMissingException(String index, String id) { + this(index, MapperService.SINGLE_MAPPING_NAME, id); + } + public RoutingMissingException(String index, String type, String id) { - super("routing is required for [" + index + "]/[" + type + "]/[" + id + "]"); + super("routing is required for [" + index + "]/[" + id + "]"); Objects.requireNonNull(index, "index must not be null"); Objects.requireNonNull(type, "type must not be null"); Objects.requireNonNull(id, "id must not be null"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 1da95c680e057..6855803ba6c45 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -364,9 +364,8 @@ static int prepareResponse( } if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) { ActiveShardCount waitForActiveShards = request.waitForActiveShards(); - assert waitForActiveShards.equals( - ActiveShardCount.DEFAULT - ) == false : "waitForActiveShards must not be DEFAULT on the request object, instead it should be NONE"; + assert waitForActiveShards.equals(ActiveShardCount.DEFAULT) == false + : "waitForActiveShards must not be DEFAULT on the request object, instead it should be NONE"; if (waitForActiveShards.equals(ActiveShardCount.ALL)) { if (response.getUnassignedShards() == 0 && response.getInitializingShards() == 0) { // if we are waiting for all shards to be active, then the num of unassigned and num of initializing shards must be 0 diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 372bae023eb7d..80049b5e30fdf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -187,24 +187,17 @@ void waitedForCompletion( TaskInfo snapshotOfRunningTask, ActionListener listener ) { - getFinishedTaskFromIndex( - thisTask, - request, - ActionListener.delegateResponse( - listener, - (delegatedListener, e) -> { - /* - * We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If - * the error isn't a 404 then we'll just throw it back to the user. - */ - if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) { - delegatedListener.onResponse(new GetTaskResponse(new TaskResult(true, snapshotOfRunningTask))); - } else { - delegatedListener.onFailure(e); - } - } - ) - ); + getFinishedTaskFromIndex(thisTask, request, ActionListener.delegateResponse(listener, (delegatedListener, e) -> { + /* + * We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If + * the error isn't a 404 then we'll just throw it back to the user. + */ + if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) { + delegatedListener.onResponse(new GetTaskResponse(new TaskResult(true, snapshotOfRunningTask))); + } else { + delegatedListener.onFailure(e); + } + })); } /** @@ -213,7 +206,7 @@ void waitedForCompletion( * coordinating node if the node is no longer part of the cluster. */ void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListener listener) { - GetRequest get = new GetRequest(TaskResultsService.TASK_INDEX, TaskResultsService.TASK_TYPE, request.getTaskId().toString()); + GetRequest get = new GetRequest(TaskResultsService.TASK_INDEX, request.getTaskId().toString()); get.setParentTask(clusterService.localNode().getId(), thisTask.getId()); client.get(get, ActionListener.wrap(r -> onGetFinishedTaskFromIndex(r, listener), e -> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index f5fe738e565b8..33d4ac5d50347 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -363,10 +363,8 @@ private void loadRepositoryData( } final long startTime = snapshotInfo.startTime(); final long endTime = snapshotInfo.endTime(); - assert endTime >= startTime - || (endTime == 0L && snapshotInfo.state().completed() == false) : "Inconsistent timestamps found in SnapshotInfo [" - + snapshotInfo - + "]"; + assert endTime >= startTime || (endTime == 0L && snapshotInfo.state().completed() == false) + : "Inconsistent timestamps found in SnapshotInfo [" + snapshotInfo + "]"; builder.add( new SnapshotStatus( new Snapshot(repositoryName, snapshotId), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushAction.java deleted file mode 100644 index dbc138b2ee387..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushAction.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.flush; - -import org.opensearch.action.ActionType; - -public class SyncedFlushAction extends ActionType { - - public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); - public static final String NAME = "indices:admin/synced_flush"; - - private SyncedFlushAction() { - super(NAME, SyncedFlushResponse::new); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushRequest.java deleted file mode 100644 index c3ed5a3c4599f..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushRequest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.flush; - -import org.opensearch.action.support.broadcast.BroadcastRequest; -import org.opensearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.util.Arrays; - -/** - * A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush - * and writes the same sync id to primary and all copies. - * - *

Best created with {@link org.opensearch.client.Requests#syncedFlushRequest(String...)}.

- * - * @see org.opensearch.client.Requests#flushRequest(String...) - * @see org.opensearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) - * @see SyncedFlushResponse - */ -public class SyncedFlushRequest extends BroadcastRequest { - - /** - * Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will - * be sync flushed. - */ - public SyncedFlushRequest(String... indices) { - super(indices); - } - - public SyncedFlushRequest(StreamInput in) throws IOException { - super(in); - } - - @Override - public String toString() { - return "SyncedFlushRequest{" + "indices=" + Arrays.toString(indices) + "}"; - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java deleted file mode 100644 index 48ac3d406655d..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.flush; - -import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.support.IndicesOptions; -import org.opensearch.client.OpenSearchClient; - -public class SyncedFlushRequestBuilder extends ActionRequestBuilder { - - public SyncedFlushRequestBuilder(OpenSearchClient client, SyncedFlushAction action) { - super(client, action, new SyncedFlushRequest()); - } - - public SyncedFlushRequestBuilder setIndices(String[] indices) { - super.request().indices(indices); - return this; - } - - public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - super.request().indicesOptions(indicesOptions); - return this; - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushResponse.java deleted file mode 100644 index 95d8d0592187b..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/SyncedFlushResponse.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.flush; - -import org.opensearch.action.ActionResponse; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.util.iterable.Iterables; -import org.opensearch.common.xcontent.ToXContentFragment; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.indices.flush.ShardsSyncedFlushResult; -import org.opensearch.indices.flush.SyncedFlushService; -import org.opensearch.rest.RestStatus; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * The result of performing a sync flush operation on all shards of multiple indices - */ -public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment { - - private final Map> shardsResultPerIndex; - private final ShardCounts shardCounts; - - public SyncedFlushResponse(Map> shardsResultPerIndex) { - // shardsResultPerIndex is never modified after it is passed to this - // constructor so this is safe even though shardsResultPerIndex is a - // ConcurrentHashMap - this.shardsResultPerIndex = unmodifiableMap(shardsResultPerIndex); - this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); - } - - public SyncedFlushResponse(StreamInput in) throws IOException { - super(in); - shardCounts = new ShardCounts(in); - Map> tmpShardsResultPerIndex = new HashMap<>(); - int numShardsResults = in.readInt(); - for (int i = 0; i < numShardsResults; i++) { - String index = in.readString(); - List shardsSyncedFlushResults = new ArrayList<>(); - int numShards = in.readInt(); - for (int j = 0; j < numShards; j++) { - shardsSyncedFlushResults.add(new ShardsSyncedFlushResult(in)); - } - tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); - } - shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); - } - - /** - * total number shards, including replicas, both assigned and unassigned - */ - public int totalShards() { - return shardCounts.total; - } - - /** - * total number of shards for which the operation failed - */ - public int failedShards() { - return shardCounts.failed; - } - - /** - * total number of shards which were successfully sync-flushed - */ - public int successfulShards() { - return shardCounts.successful; - } - - public RestStatus restStatus() { - return failedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT; - } - - public Map> getShardsResultPerIndex() { - return shardsResultPerIndex; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields._SHARDS); - shardCounts.toXContent(builder, params); - builder.endObject(); - for (Map.Entry> indexEntry : shardsResultPerIndex.entrySet()) { - List indexResult = indexEntry.getValue(); - builder.startObject(indexEntry.getKey()); - ShardCounts indexShardCounts = calculateShardCounts(indexResult); - indexShardCounts.toXContent(builder, params); - if (indexShardCounts.failed > 0) { - builder.startArray(Fields.FAILURES); - for (ShardsSyncedFlushResult shardResults : indexResult) { - if (shardResults.failed()) { - builder.startObject(); - builder.field(Fields.SHARD, shardResults.shardId().id()); - builder.field(Fields.REASON, shardResults.failureReason()); - builder.endObject(); - continue; - } - Map failedShards = shardResults.failedShards(); - for (Map.Entry shardEntry : failedShards.entrySet()) { - builder.startObject(); - builder.field(Fields.SHARD, shardResults.shardId().id()); - builder.field(Fields.REASON, shardEntry.getValue().failureReason()); - builder.field(Fields.ROUTING, shardEntry.getKey()); - builder.endObject(); - } - } - builder.endArray(); - } - builder.endObject(); - } - return builder; - } - - static ShardCounts calculateShardCounts(Iterable results) { - int total = 0, successful = 0, failed = 0; - for (ShardsSyncedFlushResult result : results) { - total += result.totalShards(); - successful += result.successfulShards(); - if (result.failed()) { - // treat all shard copies as failed - failed += result.totalShards(); - } else { - // some shards may have failed during the sync phase - failed += result.failedShards().size(); - } - } - return new ShardCounts(total, successful, failed); - } - - static final class ShardCounts implements ToXContentFragment, Writeable { - - public final int total; - public final int successful; - public final int failed; - - ShardCounts(int total, int successful, int failed) { - this.total = total; - this.successful = successful; - this.failed = failed; - } - - ShardCounts(StreamInput in) throws IOException { - total = in.readInt(); - successful = in.readInt(); - failed = in.readInt(); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.TOTAL, total); - builder.field(Fields.SUCCESSFUL, successful); - builder.field(Fields.FAILED, failed); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeInt(total); - out.writeInt(successful); - out.writeInt(failed); - } - } - - static final class Fields { - static final String _SHARDS = "_shards"; - static final String TOTAL = "total"; - static final String SUCCESSFUL = "successful"; - static final String FAILED = "failed"; - static final String FAILURES = "failures"; - static final String SHARD = "shard"; - static final String ROUTING = "routing"; - static final String REASON = "reason"; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - shardCounts.writeTo(out); - out.writeInt(shardsResultPerIndex.size()); - for (Map.Entry> entry : shardsResultPerIndex.entrySet()) { - out.writeString(entry.getKey()); - out.writeInt(entry.getValue().size()); - for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) { - shardsSyncedFlushResult.writeTo(out); - } - } - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java index e267c5e224581..53e774306e746 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.flush; +import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ReplicationResponse; @@ -40,10 +41,16 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; import org.opensearch.indices.IndicesService; +import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -75,6 +82,12 @@ public TransportShardFlushAction( ShardFlushRequest::new, ThreadPool.Names.FLUSH ); + transportService.registerRequestHandler( + PRE_SYNCED_FLUSH_ACTION_NAME, + ThreadPool.Names.FLUSH, + PreShardSyncedFlushRequest::new, + new PreSyncedFlushTransportHandler(indicesService) + ); } @Override @@ -103,4 +116,43 @@ protected void shardOperationOnReplica(ShardFlushRequest request, IndexShard rep return new ReplicaResult(); }); } + + // TODO: Remove this transition in OpenSearch 3.0 + private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; + + private static class PreShardSyncedFlushRequest extends TransportRequest { + private final ShardId shardId; + + private PreShardSyncedFlushRequest(StreamInput in) throws IOException { + super(in); + assert in.getVersion().before(Version.V_2_0_0) : "received pre_sync request from a new node"; + this.shardId = new ShardId(in); + } + + @Override + public String toString() { + return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}'; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + assert false : "must not send pre_sync request from a new node"; + throw new UnsupportedOperationException(""); + } + } + + private static final class PreSyncedFlushTransportHandler implements TransportRequestHandler { + private final IndicesService indicesService; + + PreSyncedFlushTransportHandler(IndicesService indicesService) { + this.indicesService = indicesService; + } + + @Override + public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) { + IndexShard indexShard = indicesService.indexServiceSafe(request.shardId.getIndex()).getShard(request.shardId.id()); + indexShard.flush(new FlushRequest().force(false).waitIfOngoing(true)); + throw new UnsupportedOperationException("Synced flush was removed and a normal flush was performed instead."); + } + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportSyncedFlushAction.java deleted file mode 100644 index 619c408a54c92..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportSyncedFlushAction.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.flush; - -import org.opensearch.action.ActionListener; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.common.inject.Inject; -import org.opensearch.indices.flush.SyncedFlushService; -import org.opensearch.tasks.Task; -import org.opensearch.transport.TransportService; - -/** - * Synced flush ActionType. - */ -public class TransportSyncedFlushAction extends HandledTransportAction { - - SyncedFlushService syncedFlushService; - - @Inject - public TransportSyncedFlushAction( - TransportService transportService, - ActionFilters actionFilters, - SyncedFlushService syncedFlushService - ) { - super(SyncedFlushAction.NAME, transportService, actionFilters, SyncedFlushRequest::new); - this.syncedFlushService = syncedFlushService; - } - - @Override - protected void doExecute(Task task, SyncedFlushRequest request, ActionListener listener) { - syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index aa1d5feb66c9e..7efe88e9bbc83 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -47,7 +47,6 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentParser.Token; -import org.opensearch.index.mapper.MapperService; import java.io.IOException; import java.util.ArrayList; @@ -301,10 +300,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } else { MappingMetadata mappings = null; for (final ObjectObjectCursor typeEntry : indexMappings) { - if (typeEntry.key.equals(MapperService.DEFAULT_MAPPING) == false) { - assert mappings == null; - mappings = typeEntry.value; - } + assert mappings == null; + mappings = typeEntry.value; } if (mappings == null) { // no mappings yet diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index 562c92da8673b..d486a102d1a21 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -47,7 +47,6 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.Mapper; -import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.BaseRestHandler; import java.io.IOException; @@ -165,10 +164,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (includeTypeName == false) { Map mappings = null; for (Map.Entry> typeEntry : indexEntry.getValue().entrySet()) { - if (typeEntry.getKey().equals(MapperService.DEFAULT_MAPPING) == false) { - assert mappings == null; - mappings = typeEntry.getValue(); - } + assert mappings == null; + mappings = typeEntry.getValue(); } if (mappings != null) { addFieldMappingsToBuilder(builder, params, mappings); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java index f0f5265367549..0087271147f4a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -135,10 +135,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (includeTypeName == false) { MappingMetadata mappings = null; for (final ObjectObjectCursor typeEntry : indexEntry.value) { - if (typeEntry.key.equals("_default_") == false) { - assert mappings == null; - mappings = typeEntry.value; - } + assert mappings == null; + mappings = typeEntry.value; } if (mappings == null) { // no mappings yet diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index d3b8e4c2eb1b8..ed9c086d0481c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -139,7 +139,7 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.field(Fields.NUM_DOCS, segment.getNumDocs()); builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs()); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize()); - builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(segment.getMemoryInBytes())); + builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, segment.getZeroMemory()); builder.field(Fields.COMMITTED, segment.isCommitted()); builder.field(Fields.SEARCH, segment.isSearch()); if (segment.getVersion() != null) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java index c09abd595cb73..2949af00a30d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java @@ -190,7 +190,7 @@ public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, C store = indexShard.storeStats(); break; case Indexing: - indexing = indexShard.indexingStats(flags.types()); + indexing = indexShard.indexingStats(); break; case Get: get = indexShard.getStats(); @@ -493,7 +493,7 @@ public RecoveryStats getRecoveryStats() { /** * Utility method which computes total memory by adding - * FieldData, PercolatorCache, Segments (memory, index writer, version map) + * FieldData, PercolatorCache, Segments (index writer, version map) */ public ByteSizeValue getTotalMemory() { long size = 0; @@ -504,8 +504,7 @@ public ByteSizeValue getTotalMemory() { size += this.getQueryCache().getMemorySizeInBytes(); } if (this.getSegments() != null) { - size += this.getSegments().getMemoryInBytes() + this.getSegments().getIndexWriterMemoryInBytes() + this.getSegments() - .getVersionMapMemoryInBytes(); + size += this.getSegments().getIndexWriterMemoryInBytes() + this.getSegments().getVersionMapMemoryInBytes(); } return new ByteSizeValue(size); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 6c6c94d84127c..e17b497ce312a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -34,6 +34,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; +import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -48,7 +49,6 @@ public class CommonStatsFlags implements Writeable, Cloneable { public static final CommonStatsFlags NONE = new CommonStatsFlags().clear(); private EnumSet flags = EnumSet.allOf(Flag.class); - private String[] types = null; private String[] groups = null; private String[] fieldDataFields = null; private String[] completionDataFields = null; @@ -75,7 +75,9 @@ public CommonStatsFlags(StreamInput in) throws IOException { flags.add(flag); } } - types = in.readStringArray(); + if (in.getVersion().before(Version.V_2_0_0)) { + in.readStringArray(); + } groups = in.readStringArray(); fieldDataFields = in.readStringArray(); completionDataFields = in.readStringArray(); @@ -97,7 +99,9 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeLong(longFlags); - out.writeStringArrayNullable(types); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeStringArrayNullable(Strings.EMPTY_ARRAY); + } out.writeStringArrayNullable(groups); out.writeStringArrayNullable(fieldDataFields); out.writeStringArrayNullable(completionDataFields); @@ -116,7 +120,6 @@ public void writeTo(StreamOutput out) throws IOException { */ public CommonStatsFlags all() { flags = EnumSet.allOf(Flag.class); - types = null; groups = null; fieldDataFields = null; completionDataFields = null; @@ -132,7 +135,6 @@ public CommonStatsFlags all() { */ public CommonStatsFlags clear() { flags = EnumSet.noneOf(Flag.class); - types = null; groups = null; fieldDataFields = null; completionDataFields = null; @@ -151,23 +153,6 @@ public Flag[] getFlags() { return flags.toArray(new Flag[flags.size()]); } - /** - * Document types to return stats for. Mainly affects {@link Flag#Indexing} when - * enabled, returning specific indexing stats for those types. - */ - public CommonStatsFlags types(String... types) { - this.types = types; - return this; - } - - /** - * Document types to return stats for. Mainly affects {@link Flag#Indexing} when - * enabled, returning specific indexing stats for those types. - */ - public String[] types() { - return this.types; - } - /** * Sets specific search group stats to retrieve the stats for. Mainly affects search * when enabled. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java index c5e99119d3cb7..bbe69b700b876 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -90,23 +90,6 @@ public IndicesStatsRequest flags(CommonStatsFlags flags) { return this; } - /** - * Document types to return stats for. Mainly affects {@link #indexing(boolean)} when - * enabled, returning specific indexing stats for those types. - */ - public IndicesStatsRequest types(String... types) { - flags.types(types); - return this; - } - - /** - * Document types to return stats for. Mainly affects {@link #indexing(boolean)} when - * enabled, returning specific indexing stats for those types. - */ - public String[] types() { - return this.flags.types(); - } - /** * Sets specific search group stats to retrieve the stats for. Mainly affects search * when enabled. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index afb0790367c7f..23c33401966b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -78,15 +78,6 @@ public final IndicesStatsRequestBuilder setTimeout(TimeValue timeout) { return this; } - /** - * Document types to return stats for. Mainly affects {@link #setIndexing(boolean)} when - * enabled, returning specific indexing stats for those types. - */ - public IndicesStatsRequestBuilder setTypes(String... types) { - request.types(types); - return this; - } - public IndicesStatsRequestBuilder setGroups(String... groups) { request.groups(groups); return this; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index b3c8acd2de3f5..1fb293b200e51 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -222,7 +222,6 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re String error = null; ShardSearchRequest shardSearchLocalRequest = new ShardSearchRequest( request.shardId(), - request.types(), request.nowInMillis(), request.filteringAliases() ); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java index a45b269f8c818..2002d5864e966 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java @@ -105,13 +105,7 @@ void setPrimaryResponse(BulkItemResponse primaryResponse) { */ public void abort(String index, Exception cause) { if (primaryResponse == null) { - final BulkItemResponse.Failure failure = new BulkItemResponse.Failure( - index, - request.type(), - request.id(), - Objects.requireNonNull(cause), - true - ); + final BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.id(), Objects.requireNonNull(cause), true); setPrimaryResponse(new BulkItemResponse(id, request.opType(), failure)); } else { assert primaryResponse.isFailed() && primaryResponse.getFailure().isAborted() : "response [" diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index af34789401a6b..fdb27a00bac2d 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -35,6 +35,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; +import org.opensearch.Version; import org.opensearch.action.DocWriteRequest.OpType; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.delete.DeleteResponse; @@ -51,6 +52,7 @@ import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; @@ -69,7 +71,6 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { private static final String _INDEX = "_index"; - private static final String _TYPE = "_type"; private static final String _ID = "_id"; private static final String STATUS = "status"; private static final String ERROR = "error"; @@ -88,7 +89,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(STATUS, response.status().getStatus()); } else { builder.field(_INDEX, failure.getIndex()); - builder.field(_TYPE, failure.getType()); builder.field(_ID, failure.getId()); builder.field(STATUS, failure.getStatus().getStatus()); builder.startObject(ERROR); @@ -166,7 +166,7 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw BulkItemResponse bulkItemResponse; if (exception != null) { - Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getType(), builder.getId(), exception, status); + Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getId(), exception, status); bulkItemResponse = new BulkItemResponse(id, opType, failure); } else { bulkItemResponse = new BulkItemResponse(id, opType, builder.build()); @@ -179,13 +179,11 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw */ public static class Failure implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; - public static final String TYPE_FIELD = "type"; public static final String ID_FIELD = "id"; public static final String CAUSE_FIELD = "cause"; public static final String STATUS_FIELD = "status"; private final String index; - private final String type; private final String id; private final Exception cause; private final RestStatus status; @@ -196,11 +194,10 @@ public static class Failure implements Writeable, ToXContentFragment { public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "bulk_failures", true, - a -> new Failure((String) a[0], (String) a[1], (String) a[2], (Exception) a[3], RestStatus.fromCode((int) a[4])) + a -> new Failure((String) a[0], (String) a[1], (Exception) a[2], RestStatus.fromCode((int) a[3])) ); static { PARSER.declareString(constructorArg(), new ParseField(INDEX_FIELD)); - PARSER.declareString(constructorArg(), new ParseField(TYPE_FIELD)); PARSER.declareString(optionalConstructorArg(), new ParseField(ID_FIELD)); PARSER.declareObject(constructorArg(), (p, c) -> OpenSearchException.fromXContent(p), new ParseField(CAUSE_FIELD)); PARSER.declareInt(constructorArg(), new ParseField(STATUS_FIELD)); @@ -209,13 +206,12 @@ public static class Failure implements Writeable, ToXContentFragment { /** * For write failures before operation was assigned a sequence number. * - * use @{link {@link #Failure(String, String, String, Exception, long, long)}} + * use @{link {@link #Failure(String, String, Exception, long, long)}} * to record operation sequence no with failure */ - public Failure(String index, String type, String id, Exception cause) { + public Failure(String index, String id, Exception cause) { this( index, - type, id, cause, ExceptionsHelper.status(cause), @@ -225,10 +221,9 @@ public Failure(String index, String type, String id, Exception cause) { ); } - public Failure(String index, String type, String id, Exception cause, boolean aborted) { + public Failure(String index, String id, Exception cause, boolean aborted) { this( index, - type, id, cause, ExceptionsHelper.status(cause), @@ -238,18 +233,17 @@ public Failure(String index, String type, String id, Exception cause, boolean ab ); } - public Failure(String index, String type, String id, Exception cause, RestStatus status) { - this(index, type, id, cause, status, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, false); + public Failure(String index, String id, Exception cause, RestStatus status) { + this(index, id, cause, status, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, false); } /** For write failures after operation was assigned a sequence number. */ - public Failure(String index, String type, String id, Exception cause, long seqNo, long term) { - this(index, type, id, cause, ExceptionsHelper.status(cause), seqNo, term, false); + public Failure(String index, String id, Exception cause, long seqNo, long term) { + this(index, id, cause, ExceptionsHelper.status(cause), seqNo, term, false); } - private Failure(String index, String type, String id, Exception cause, RestStatus status, long seqNo, long term, boolean aborted) { + private Failure(String index, String id, Exception cause, RestStatus status, long seqNo, long term, boolean aborted) { this.index = index; - this.type = type; this.id = id; this.cause = cause; this.status = status; @@ -263,7 +257,11 @@ private Failure(String index, String type, String id, Exception cause, RestStatu */ public Failure(StreamInput in) throws IOException { index = in.readString(); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + in.readString(); + // can't make an assertion about type names here because too many tests still set their own + // types bypassing various checks + } id = in.readOptionalString(); cause = in.readException(); status = ExceptionsHelper.status(cause); @@ -279,7 +277,9 @@ public Failure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - out.writeString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeOptionalString(id); out.writeException(cause); out.writeZLong(seqNo); @@ -296,13 +296,6 @@ public String getIndex() { return this.index; } - /** - * The type of the action. - */ - public String getType() { - return type; - } - /** * The id of the action. */ @@ -361,7 +354,6 @@ public boolean isAborted() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(INDEX_FIELD, index); - builder.field(TYPE_FIELD, type); if (id != null) { builder.field(ID_FIELD, id); } @@ -468,16 +460,6 @@ public String getIndex() { return response.getIndex(); } - /** - * The type of the action. - */ - public String getType() { - if (failure != null) { - return failure.getType(); - } - return response.getType(); - } - /** * The id of the action. */ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java index 1ccf5f1924f8f..da8833fe49a29 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java @@ -109,8 +109,8 @@ private static boolean isAborted(BulkItemResponse response) { /** move to the next item to execute */ private void advance() { - assert currentItemState == ItemProcessingState.COMPLETED - || currentIndex == -1 : "moving to next but current item wasn't completed (state: " + currentItemState + ")"; + assert currentItemState == ItemProcessingState.COMPLETED || currentIndex == -1 + : "moving to next but current item wasn't completed (state: " + currentItemState + ")"; currentItemState = ItemProcessingState.INITIAL; currentIndex = findNextNonAborted(currentIndex + 1); retryCounter = 0; @@ -251,7 +251,7 @@ public void failOnMappingUpdate(Exception cause) { docWriteRequest.opType(), // Make sure to use getCurrentItem().index() here, if you use docWriteRequest.index() it will use the // concrete index instead of an alias if used! - new BulkItemResponse.Failure(getCurrentItem().index(), docWriteRequest.type(), docWriteRequest.id(), cause) + new BulkItemResponse.Failure(getCurrentItem().index(), docWriteRequest.id(), cause) ); markAsCompleted(executionResult); } @@ -268,7 +268,6 @@ public void markOperationAsExecuted(Engine.Result result) { Engine.IndexResult indexResult = (Engine.IndexResult) result; response = new IndexResponse( primary.shardId(), - requestToExecute.type(), requestToExecute.id(), result.getSeqNo(), result.getTerm(), @@ -279,7 +278,6 @@ public void markOperationAsExecuted(Engine.Result result) { Engine.DeleteResult deleteResult = (Engine.DeleteResult) result; response = new DeleteResponse( primary.shardId(), - requestToExecute.type(), requestToExecute.id(), deleteResult.getSeqNo(), result.getTerm(), @@ -304,7 +302,6 @@ public void markOperationAsExecuted(Engine.Result result) { // concrete index instead of an alias if used! new BulkItemResponse.Failure( request.index(), - docWriteRequest.type(), docWriteRequest.id(), result.getFailure(), result.getSeqNo(), diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java index 936604d84a15d..90a177119cfd8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java @@ -106,7 +106,6 @@ public static class Builder { private TimeValue flushInterval = null; private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); private String globalIndex; - private String globalType; private String globalRouting; private String globalPipeline; @@ -168,11 +167,6 @@ public Builder setGlobalIndex(String globalIndex) { return this; } - public Builder setGlobalType(String globalType) { - this.globalType = globalType; - return this; - } - public Builder setGlobalRouting(String globalRouting) { this.globalRouting = globalRouting; return this; @@ -219,7 +213,7 @@ public BulkProcessor build() { } private Supplier createBulkRequestWithGlobalDefaults() { - return () -> new BulkRequest(globalIndex, globalType).pipeline(globalPipeline).routing(globalRouting); + return () -> new BulkRequest(globalIndex).pipeline(globalPipeline).routing(globalRouting); } } @@ -452,9 +446,8 @@ private void internalAdd(DocWriteRequest request) { /** * Adds the data from the bytes to be processed by the bulk processor */ - public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, XContentType xContentType) - throws Exception { - return add(data, defaultIndex, defaultType, null, xContentType); + public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, XContentType xContentType) throws Exception { + return add(data, defaultIndex, null, xContentType); } /** @@ -463,7 +456,6 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nu public BulkProcessor add( BytesReference data, @Nullable String defaultIndex, - @Nullable String defaultType, @Nullable String defaultPipeline, XContentType xContentType ) throws Exception { @@ -471,7 +463,7 @@ public BulkProcessor add( lock.lock(); try { ensureOpen(); - bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, null, true, xContentType); + bulkRequest.add(data, defaultIndex, null, null, defaultPipeline, null, true, xContentType); bulkRequestToExecute = newBulkRequestIfNeeded(); } finally { lock.unlock(); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index a05f5dac2eb1b..e3bf5bced5072 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -52,7 +52,6 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; @@ -92,7 +91,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques private String globalPipeline; private String globalRouting; private String globalIndex; - private String globalType; private Boolean globalRequireAlias; private long sizeInBytes = 0; @@ -111,15 +109,6 @@ public BulkRequest(@Nullable String globalIndex) { this.globalIndex = globalIndex; } - /** - * @deprecated Types are in the process of being removed. Use {@link #BulkRequest(String)} instead - */ - @Deprecated - public BulkRequest(@Nullable String globalIndex, @Nullable String globalType) { - this.globalIndex = globalIndex; - this.globalType = globalType; - } - /** * Adds a list of requests to be executed. Either index or delete requests. */ @@ -246,62 +235,21 @@ public long estimatedSizeInBytes() { * Adds a framed data in binary format */ public BulkRequest add(byte[] data, int from, int length, XContentType xContentType) throws IOException { - return add(data, from, length, null, null, xContentType); - } - - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(byte[], int, int, String, XContentType)} instead - */ - @Deprecated - public BulkRequest add( - byte[] data, - int from, - int length, - @Nullable String defaultIndex, - @Nullable String defaultType, - XContentType xContentType - ) throws IOException { - return add(new BytesArray(data, from, length), defaultIndex, defaultType, xContentType); + return add(data, from, length, null, xContentType); } /** * Adds a framed data in binary format */ public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, XContentType xContentType) throws IOException { - return add(new BytesArray(data, from, length), defaultIndex, MapperService.SINGLE_MAPPING_NAME, xContentType); - } - - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(BytesReference, String, XContentType)} instead - */ - @Deprecated - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, XContentType xContentType) - throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, true, xContentType); + return add(new BytesArray(data, from, length), defaultIndex, xContentType); } /** * Adds a framed data in binary format */ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, XContentType xContentType) throws IOException { - return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, null, true, xContentType); - } - - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(BytesReference, String, boolean, XContentType)} instead - */ - @Deprecated - public BulkRequest add( - BytesReference data, - @Nullable String defaultIndex, - @Nullable String defaultType, - boolean allowExplicitIndex, - XContentType xContentType - ) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex, xContentType); + return add(data, defaultIndex, null, null, null, null, true, xContentType); } /** @@ -309,7 +257,7 @@ public BulkRequest add( */ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, boolean allowExplicitIndex, XContentType xContentType) throws IOException { - return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, null, allowExplicitIndex, xContentType); + return add(data, defaultIndex, null, null, null, null, allowExplicitIndex, xContentType); } public BulkRequest add( @@ -321,27 +269,12 @@ public BulkRequest add( boolean allowExplicitIndex, XContentType xContentType ) throws IOException { - return add( - data, - defaultIndex, - MapperService.SINGLE_MAPPING_NAME, - defaultRouting, - defaultFetchSourceContext, - defaultPipeline, - null, - allowExplicitIndex, - xContentType - ); + return add(data, defaultIndex, defaultRouting, defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex, xContentType); } - /** - * @deprecated use {@link #add(BytesReference, String, String, FetchSourceContext, String, boolean, XContentType)} instead - */ - @Deprecated public BulkRequest add( BytesReference data, @Nullable String defaultIndex, - @Nullable String defaultType, @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @@ -355,14 +288,13 @@ public BulkRequest add( new BulkRequestParser(true).parse( data, defaultIndex, - defaultType, routing, defaultFetchSourceContext, pipeline, requireAlias, allowExplicitIndex, xContentType, - this::internalAdd, + (indexRequest, type) -> internalAdd(indexRequest), this::internalAdd, this::add ); @@ -526,9 +458,6 @@ public String getDescription() { private void applyGlobalMandatoryParameters(DocWriteRequest request) { request.index(valueOrDefault(request.index(), globalIndex)); - if (Strings.isNullOrEmpty(globalType) == false && MapperService.SINGLE_MAPPING_NAME.equals(globalType) == false) { - request.defaultTypeIfNull(globalType); - } } private static String valueOrDefault(String value, String globalDefault) { diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java index c2e372129e9ae..c58877e48a7eb 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java @@ -46,7 +46,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.mapper.MapperService; /** * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes @@ -54,14 +53,6 @@ */ public class BulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { - /** - * @deprecated use {@link #BulkRequestBuilder(OpenSearchClient, BulkAction, String)} instead - */ - @Deprecated - public BulkRequestBuilder(OpenSearchClient client, BulkAction action, @Nullable String globalIndex, @Nullable String globalType) { - super(client, action, new BulkRequest(globalIndex, globalType)); - } - public BulkRequestBuilder(OpenSearchClient client, BulkAction action, @Nullable String globalIndex) { super(client, action, new BulkRequest(globalIndex)); } @@ -128,29 +119,12 @@ public BulkRequestBuilder add(byte[] data, int from, int length, XContentType xC return this; } - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(byte[], int, int, String, XContentType)} instead - */ - @Deprecated - public BulkRequestBuilder add( - byte[] data, - int from, - int length, - @Nullable String defaultIndex, - @Nullable String defaultType, - XContentType xContentType - ) throws Exception { - request.add(data, from, length, defaultIndex, defaultType, xContentType); - return this; - } - /** * Adds a framed data in binary format */ public BulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, XContentType xContentType) throws Exception { - request.add(data, from, length, defaultIndex, MapperService.SINGLE_MAPPING_NAME, xContentType); + request.add(data, from, length, defaultIndex, xContentType); return this; } diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 488c667d000d8..042e104f70c7f 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -40,7 +40,6 @@ import org.opensearch.common.ParseField; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.NamedXContentRegistry; @@ -49,12 +48,12 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.rest.action.document.RestBulkAction; import org.opensearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -65,8 +64,6 @@ */ public final class BulkRequestParser { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(BulkRequestParser.class); - private static final ParseField INDEX = new ParseField("_index"); private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); @@ -81,14 +78,15 @@ public final class BulkRequestParser { private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); private static final ParseField REQUIRE_ALIAS = new ParseField(DocWriteRequest.REQUIRE_ALIAS); - private final boolean warnOnTypeUsage; + // TODO: Remove this parameter once the BulkMonitoring endpoint has been removed + private final boolean errorOnType; /** * Create a new parser. - * @param warnOnTypeUsage whether it warns upon types being explicitly specified + * @param errorOnType whether to allow _type information in the index line; used by BulkMonitoring */ - public BulkRequestParser(boolean warnOnTypeUsage) { - this.warnOnTypeUsage = warnOnTypeUsage; + public BulkRequestParser(boolean errorOnType) { + this.errorOnType = errorOnType; } private static int findNextMarker(byte marker, int from, BytesReference data) { @@ -136,45 +134,7 @@ public void parse( @Nullable Boolean defaultRequireAlias, boolean allowExplicitIndex, XContentType xContentType, - Consumer indexRequestConsumer, - Consumer updateRequestConsumer, - Consumer deleteRequestConsumer - ) throws IOException { - parse( - data, - defaultIndex, - null, - defaultRouting, - defaultFetchSourceContext, - defaultPipeline, - defaultRequireAlias, - allowExplicitIndex, - xContentType, - indexRequestConsumer, - updateRequestConsumer, - deleteRequestConsumer - ); - } - - /** - * Parse the provided {@code data} assuming the provided default values. Index requests - * will be passed to the {@code indexRequestConsumer}, update requests to the - * {@code updateRequestConsumer} and delete requests to the {@code deleteRequestConsumer}. - * @deprecated Use {@link #parse(BytesReference, String, String, FetchSourceContext, String, Boolean, boolean, XContentType, - * Consumer, Consumer, Consumer)} instead. - */ - @Deprecated - public void parse( - BytesReference data, - @Nullable String defaultIndex, - @Nullable String defaultType, - @Nullable String defaultRouting, - @Nullable FetchSourceContext defaultFetchSourceContext, - @Nullable String defaultPipeline, - @Nullable Boolean defaultRequireAlias, - boolean allowExplicitIndex, - XContentType xContentType, - Consumer indexRequestConsumer, + BiConsumer indexRequestConsumer, Consumer updateRequestConsumer, Consumer deleteRequestConsumer ) throws IOException { @@ -182,7 +142,6 @@ public void parse( int line = 0; int from = 0; byte marker = xContent.streamSeparator(); - boolean typesDeprecationLogged = false; // Bulk requests can contain a lot of repeated strings for the index, pipeline and routing parameters. This map is used to // deduplicate duplicate strings parsed for these parameters. While it does not prevent instantiating the duplicate strings, it // reduces their lifetime to the lifetime of this parse call instead of the lifetime of the full bulk request. @@ -231,7 +190,7 @@ public void parse( String action = parser.currentName(); String index = defaultIndex; - String type = defaultType; + String type = null; String id = null; String routing = defaultRouting; FetchSourceContext fetchSourceContext = defaultFetchSourceContext; @@ -255,14 +214,15 @@ public void parse( currentFieldName = parser.currentName(); } else if (token.isValue()) { if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { - if (!allowExplicitIndex) { + if (allowExplicitIndex == false) { throw new IllegalArgumentException("explicit index in bulk is not allowed"); } index = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity()); } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - if (warnOnTypeUsage && typesDeprecationLogged == false) { - deprecationLogger.deprecate("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE); - typesDeprecationLogged = true; + if (errorOnType) { + throw new IllegalArgumentException( + "Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]" + ); } type = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity()); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { @@ -333,7 +293,8 @@ public void parse( if ("delete".equals(action)) { deleteRequestConsumer.accept( - new DeleteRequest(index, type, id).routing(routing) + new DeleteRequest(index).id(id) + .routing(routing) .version(version) .versionType(versionType) .setIfSeqNo(ifSeqNo) @@ -351,18 +312,21 @@ public void parse( if ("index".equals(action)) { if (opType == null) { indexRequestConsumer.accept( - new IndexRequest(index, type, id).routing(routing) + new IndexRequest(index).id(id) + .routing(routing) .version(version) .versionType(versionType) .setPipeline(pipeline) .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias) + .setRequireAlias(requireAlias), + type ); } else { indexRequestConsumer.accept( - new IndexRequest(index, type, id).routing(routing) + new IndexRequest(index).id(id) + .routing(routing) .version(version) .versionType(versionType) .create("create".equals(opType)) @@ -370,12 +334,14 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias) + .setRequireAlias(requireAlias), + type ); } } else if ("create".equals(action)) { indexRequestConsumer.accept( - new IndexRequest(index, type, id).routing(routing) + new IndexRequest(index).id(id) + .routing(routing) .version(version) .versionType(versionType) .create(true) @@ -383,7 +349,8 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias) + .setRequireAlias(requireAlias), + type ); } else if ("update".equals(action)) { if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { @@ -391,7 +358,9 @@ public void parse( "Update requests do not support versioning. " + "Please use `if_seq_no` and `if_primary_term` instead" ); } - UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing) + UpdateRequest updateRequest = new UpdateRequest().index(index) + .id(id) + .routing(routing) .retryOnConflict(retryOnConflict) .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java index ef76913d9e1f3..751ad567c8639 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java @@ -129,8 +129,6 @@ public String buildFailureMessage() { .append(i) .append("]: index [") .append(response.getIndex()) - .append("], type [") - .append(response.getType()) .append("], id [") .append(response.getId()) .append("], message [") diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index f3d3581d5172a..560fd1d8a45b3 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -210,9 +210,8 @@ public static IndexRequest getIndexWriteRequest(DocWriteRequest docWriteReque @Override protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener listener) { - final long indexingBytes = bulkRequest.ramBytesUsed(); final boolean isOnlySystem = isOnlySystem(bulkRequest, clusterService.state().metadata().getIndicesLookup(), systemIndices); - final Releasable releasable = indexingPressureService.markCoordinatingOperationStarted(indexingBytes, isOnlySystem); + final Releasable releasable = indexingPressureService.markCoordinatingOperationStarted(bulkRequest::ramBytesUsed, isOnlySystem); final ActionListener releasingListener = ActionListener.runBefore(listener, releasable::close); final String executorName = isOnlySystem ? Names.SYSTEM_WRITE : Names.WRITE; try { @@ -469,10 +468,7 @@ private boolean setResponseFailureIfIndexMatches( Exception e ) { if (index.equals(request.index())) { - responses.set( - idx, - new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e)) - ); + responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.id(), e))); return true; } return false; @@ -553,7 +549,7 @@ protected void doRun() { prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); IndexRequest indexRequest = (IndexRequest) docWriteRequest; final IndexMetadata indexMetadata = metadata.index(concreteIndex); - MappingMetadata mappingMd = indexMetadata.mappingOrDefault(); + MappingMetadata mappingMd = indexMetadata.mapping(); Version indexCreated = indexMetadata.getCreationVersion(); indexRequest.resolveRouting(metadata); indexRequest.process(indexCreated, mappingMd, concreteIndex.getName()); @@ -569,19 +565,14 @@ protected void doRun() { docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); // check if routing is required, if so, throw error if routing wasn't specified if (docWriteRequest.routing() == null && metadata.routingRequired(concreteIndex.getName())) { - throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id()); + throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.id()); } break; default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); } } catch (OpenSearchParseException | IllegalArgumentException | RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure( - concreteIndex.getName(), - docWriteRequest.type(), - docWriteRequest.id(), - e - ); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.id(), e); BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); responses.set(i, bulkItemResponse); // make sure the request gets never processed again @@ -631,7 +622,7 @@ protected void doRun() { final boolean isOnlySystem = isOnlySystem(bulkRequest, clusterService.state().metadata().getIndicesLookup(), systemIndices); final Releasable releasable = indexingPressureService.markCoordinatingOperationStarted( shardId, - bulkShardRequest.ramBytesUsed(), + bulkShardRequest::ramBytesUsed, isOnlySystem ); shardBulkAction.execute(bulkShardRequest, ActionListener.runBefore(new ActionListener() { @@ -660,7 +651,7 @@ public void onFailure(Exception e) { new BulkItemResponse( request.id(), docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.type(), docWriteRequest.id(), e) + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) ) ); } @@ -760,12 +751,7 @@ private boolean addFailureIfIndexIsUnavailable( } private void addFailure(DocWriteRequest request, int idx, Exception unavailableException) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure( - request.index(), - request.type(), - request.id(), - unavailableException - ); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.id(), unavailableException); BulkItemResponse bulkItemResponse = new BulkItemResponse(idx, request.opType(), failure); responses.set(idx, bulkItemResponse); // make sure the request gets never processed again @@ -963,7 +949,6 @@ synchronized void markItemAsDropped(int slot) { indexRequest.opType(), new UpdateResponse( new ShardId(indexRequest.index(), IndexMetadata.INDEX_UUID_NA_VALUE, 0), - indexRequest.type(), id, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, @@ -979,10 +964,9 @@ synchronized void markItemAsFailed(int slot, Exception e) { logger.debug( String.format( Locale.ROOT, - "failed to execute pipeline [%s] for document [%s/%s/%s]", + "failed to execute pipeline [%s] for document [%s/%s]", indexRequest.getPipeline(), indexRequest.index(), - indexRequest.type(), indexRequest.id() ), e @@ -993,12 +977,7 @@ synchronized void markItemAsFailed(int slot, Exception e) { // 2) Add a bulk item failure for this request // 3) Continue with the next request in the bulk. failedSlots.set(slot); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure( - indexRequest.index(), - indexRequest.type(), - indexRequest.id(), - e - ); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.id(), e); itemResponses.add(new BulkItemResponse(slot, indexRequest.opType(), failure)); } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 1ce4a346e5dc3..ed407bd37d684 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -311,7 +311,7 @@ static boolean executeBulkItemRequest( case UPDATED: IndexRequest indexRequest = updateResult.action(); IndexMetadata metadata = context.getPrimary().indexSettings().getIndexMetadata(); - MappingMetadata mappingMd = metadata.mappingOrDefault(); + MappingMetadata mappingMd = metadata.mapping(); indexRequest.process(metadata.getCreationVersion(), mappingMd, updateRequest.concreteIndex()); context.setRequestToExecute(indexRequest); break; @@ -340,7 +340,7 @@ static boolean executeBulkItemRequest( final DeleteRequest request = context.getRequestToExecute(); result = primary.applyDeleteOperationOnPrimary( version, - request.type(), + MapperService.SINGLE_MAPPING_NAME, request.id(), request.versionType(), request.ifSeqNo(), @@ -353,7 +353,7 @@ static boolean executeBulkItemRequest( request.versionType(), new SourceToParse( request.index(), - request.type(), + MapperService.SINGLE_MAPPING_NAME, request.id(), request.source(), request.getContentType(), @@ -370,7 +370,7 @@ static boolean executeBulkItemRequest( try { primary.mapperService() .merge( - context.getRequestToExecute().type(), + MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(result.getRequiredMappingUpdate(), XContentType.JSON, ToXContent.EMPTY_PARAMS), MapperService.MergeReason.MAPPING_UPDATE_PREFLIGHT ); @@ -383,7 +383,7 @@ static boolean executeBulkItemRequest( mappingUpdater.updateMappings( result.getRequiredMappingUpdate(), primary.shardId(), - context.getRequestToExecute().type(), + MapperService.SINGLE_MAPPING_NAME, new ActionListener() { @Override public void onResponse(Void v) { @@ -485,7 +485,6 @@ static BulkItemResponse processUpdateResponse( updateResponse = new UpdateResponse( indexResponse.getShardInfo(), indexResponse.getShardId(), - indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getPrimaryTerm(), @@ -518,7 +517,6 @@ static BulkItemResponse processUpdateResponse( updateResponse = new UpdateResponse( deleteResponse.getShardInfo(), deleteResponse.getShardId(), - deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), deleteResponse.getPrimaryTerm(), @@ -608,7 +606,7 @@ private static Engine.Result performOpOnReplica( final ShardId shardId = replica.shardId(); final SourceToParse sourceToParse = new SourceToParse( shardId.getIndexName(), - indexRequest.type(), + MapperService.SINGLE_MAPPING_NAME, indexRequest.id(), indexRequest.source(), indexRequest.getContentType(), @@ -629,7 +627,7 @@ private static Engine.Result performOpOnReplica( primaryResponse.getSeqNo(), primaryResponse.getPrimaryTerm(), primaryResponse.getVersion(), - deleteRequest.type(), + MapperService.SINGLE_MAPPING_NAME, deleteRequest.id() ); break; diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index b6fda084e2958..c40933ba9c92e 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -34,6 +34,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; import org.opensearch.action.DocWriteRequest; @@ -57,7 +58,7 @@ * A request to delete a document from an index based on its type and id. Best created using * {@link org.opensearch.client.Requests#deleteRequest(String)}. *

- * The operation requires the {@link #index()}, {@link #type(String)} and {@link #id(String)} to + * The operation requires the {@link #index()}, and {@link #id(String)} to * be set. * * @see DeleteResponse @@ -73,8 +74,6 @@ public class DeleteRequest extends ReplicatedWriteRequest private static final ShardId NO_SHARD_ID = null; - // Set to null initially so we can know to override in bulk requests that have a default type. - private String type; private String id; @Nullable private String routing; @@ -89,7 +88,10 @@ public DeleteRequest(StreamInput in) throws IOException { public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + String type = in.readString(); + assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; + } id = in.readString(); routing = in.readOptionalString(); if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -106,7 +108,7 @@ public DeleteRequest() { } /** - * Constructs a new delete request against the specified index. The {@link #type(String)} and {@link #id(String)} + * Constructs a new delete request against the specified index. The {@link #id(String)} * must be set. */ public DeleteRequest(String index) { @@ -114,23 +116,6 @@ public DeleteRequest(String index) { this.index = index; } - /** - * Constructs a new delete request against the specified index with the type and id. - * - * @param index The index to get the document from - * @param type The type of the document - * @param id The id of the document - * - * @deprecated Types are in the process of being removed. Use {@link #DeleteRequest(String, String)} instead. - */ - @Deprecated - public DeleteRequest(String index, String type, String id) { - super(NO_SHARD_ID); - this.index = index; - this.type = type; - this.id = id; - } - /** * Constructs a new delete request against the specified index and id. * @@ -146,9 +131,6 @@ public DeleteRequest(String index, String id) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); - if (Strings.isEmpty(type())) { - validationException = addValidationError("type is missing", validationException); - } if (Strings.isEmpty(id)) { validationException = addValidationError("id is missing", validationException); } @@ -158,48 +140,6 @@ public ActionRequestValidationException validate() { return validationException; } - /** - * The type of the document to delete. - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public String type() { - if (type == null) { - return MapperService.SINGLE_MAPPING_NAME; - } - return type; - } - - /** - * Sets the type of the document to delete. - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public DeleteRequest type(String type) { - this.type = type; - return this; - } - - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public DeleteRequest defaultTypeIfNull(String defaultType) { - if (Strings.isNullOrEmpty(type)) { - type = defaultType; - } - return this; - } - /** * The id of the document to delete. */ @@ -333,9 +273,9 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeBody(StreamOutput out) throws IOException { - // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. - // So we use the type accessor method here to make the type non-null (will default it to "_doc"). - out.writeString(type()); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeOptionalString(routing()); if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -349,7 +289,7 @@ private void writeBody(StreamOutput out) throws IOException { @Override public String toString() { - return "delete {[" + index + "][" + type() + "][" + id + "]}"; + return "delete {[" + index + "][" + id + "]}"; } @Override diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java index f3d15cb9b0555..28abf092ad72d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java @@ -55,9 +55,10 @@ public DeleteRequestBuilder(OpenSearchClient client, DeleteAction action, @Nulla /** * Sets the type of the document to delete. + * @deprecated types will be removed */ + @Deprecated public DeleteRequestBuilder setType(String type) { - request.type(type); return this; } diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java index 21438313a7faa..6b000561ad282 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java @@ -58,12 +58,12 @@ public DeleteResponse(StreamInput in) throws IOException { super(in); } - public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean found) { - this(shardId, type, id, seqNo, primaryTerm, version, found ? Result.DELETED : Result.NOT_FOUND); + public DeleteResponse(ShardId shardId, String id, long seqNo, long primaryTerm, long version, boolean found) { + this(shardId, id, seqNo, primaryTerm, version, found ? Result.DELETED : Result.NOT_FOUND); } - private DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { - super(shardId, type, id, seqNo, primaryTerm, version, assertDeletedOrNotFound(result)); + private DeleteResponse(ShardId shardId, String id, long seqNo, long primaryTerm, long version, Result result) { + super(shardId, id, seqNo, primaryTerm, version, assertDeletedOrNotFound(result)); } private static Result assertDeletedOrNotFound(Result result) { @@ -81,7 +81,6 @@ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("DeleteResponse["); builder.append("index=").append(getIndex()); - builder.append(",type=").append(getType()); builder.append(",id=").append(getId()); builder.append(",version=").append(getVersion()); builder.append(",result=").append(getResult().getLowercase()); @@ -115,7 +114,7 @@ public static class Builder extends DocWriteResponse.Builder { @Override public DeleteResponse build() { - DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version, result); + DeleteResponse deleteResponse = new DeleteResponse(shardId, id, seqNo, primaryTerm, version, result); deleteResponse.setForcedRefresh(forcedRefresh); if (shardInfo != null) { deleteResponse.setShardInfo(shardInfo); diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java index d0098ea9b111a..1543c2c95b269 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.explain; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; @@ -57,7 +58,6 @@ public class ExplainRequest extends SingleShardRequest implement private static final ParseField QUERY_FIELD = new ParseField("query"); - private String type = MapperService.SINGLE_MAPPING_NAME; private String id; private String routing; private String preference; @@ -71,16 +71,6 @@ public class ExplainRequest extends SingleShardRequest implement public ExplainRequest() {} - /** - * @deprecated Types are in the process of being removed. Use {@link ExplainRequest(String, String) instead.} - */ - @Deprecated - public ExplainRequest(String index, String type, String id) { - this.index = index; - this.type = type; - this.id = id; - } - public ExplainRequest(String index, String id) { this.index = index; this.id = id; @@ -88,7 +78,9 @@ public ExplainRequest(String index, String id) { ExplainRequest(StreamInput in) throws IOException { super(in); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + in.readString(); + } id = in.readString(); routing = in.readOptionalString(); preference = in.readOptionalString(); @@ -99,23 +91,6 @@ public ExplainRequest(String index, String id) { nowInMillis = in.readVLong(); } - /** - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public String type() { - return type; - } - - /** - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public ExplainRequest type(String type) { - this.type = type; - return this; - } - public String id() { return id; } @@ -196,9 +171,6 @@ public ExplainRequest filteringAlias(AliasFilter filteringAlias) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validateNonNullIndex(); - if (Strings.isEmpty(type)) { - validationException = addValidationError("type is missing", validationException); - } if (Strings.isEmpty(id)) { validationException = addValidationError("id is missing", validationException); } @@ -211,7 +183,9 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeOptionalString(routing); out.writeOptionalString(preference); diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java index c161a6e639870..6839479079845 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java @@ -48,16 +48,8 @@ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder PARSER = new ConstructingObjectParser<>( "explain", true, - (arg, exists) -> new ExplainResponse( - (String) arg[0], - (String) arg[1], - (String) arg[2], - exists, - (Explanation) arg[3], - (GetResult) arg[4] - ) + (arg, exists) -> new ExplainResponse((String) arg[0], (String) arg[1], exists, (Explanation) arg[2], (GetResult) arg[3]) ); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), _INDEX); - PARSER.declareString(ConstructingObjectParser.constructorArg(), _TYPE); PARSER.declareString(ConstructingObjectParser.constructorArg(), _ID); final ConstructingObjectParser explanationParser = new ConstructingObjectParser<>( "explanation", @@ -211,7 +195,6 @@ public static ExplainResponse fromXContent(XContentParser parser, boolean exists public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(_INDEX.getPreferredName(), index); - builder.field(_TYPE.getPreferredName(), type); builder.field(_ID.getPreferredName(), id); builder.field(MATCHED.getPreferredName(), isMatch()); if (hasExplanation()) { @@ -253,7 +236,6 @@ public boolean equals(Object obj) { } ExplainResponse other = (ExplainResponse) obj; return index.equals(other.index) - && type.equals(other.type) && id.equals(other.id) && Objects.equals(explanation, other.explanation) && getResult.isExists() == other.getResult.isExists() @@ -263,6 +245,6 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(index, type, id, explanation, getResult.isExists(), getResult.sourceAsMap(), getResult.getFields()); + return Objects.hash(index, id, explanation, getResult.isExists(), getResult.sourceAsMap(), getResult.getFields()); } } diff --git a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java index 3d40cb30cdee5..9fb16eec7d36b 100644 --- a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java @@ -43,7 +43,6 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.lease.Releasables; @@ -51,7 +50,6 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.get.GetResult; import org.opensearch.index.mapper.IdFieldMapper; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.Uid; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -116,7 +114,7 @@ protected void resolveRequest(ClusterState state, InternalRequest request) { request.request().filteringAlias(aliasFilter); // Fail fast on the node that received the request. if (request.request().routing() == null && state.getMetadata().routingRequired(request.concreteIndex())) { - throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); + throw new RoutingMissingException(request.concreteIndex(), request.request().id()); } } @@ -136,21 +134,15 @@ protected void asyncShardOperation(ExplainRequest request, ShardId shardId, Acti @Override protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException { - String[] types; - if (MapperService.SINGLE_MAPPING_NAME.equals(request.type())) { // typeless explain call - types = Strings.EMPTY_ARRAY; - } else { - types = new String[] { request.type() }; - } - ShardSearchRequest shardSearchLocalRequest = new ShardSearchRequest(shardId, types, request.nowInMillis, request.filteringAlias()); + ShardSearchRequest shardSearchLocalRequest = new ShardSearchRequest(shardId, request.nowInMillis, request.filteringAlias()); SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT); Engine.GetResult result = null; try { // No need to check the type, IndexShard#get does it for us Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(request.id())); - result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm)); + result = context.indexShard().get(new Engine.Get(false, false, request.id(), uidTerm)); if (!result.exists()) { - return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); + return new ExplainResponse(shardId.getIndexName(), request.id(), false); } context.parsedQuery(context.getQueryShardContext().toQuery(request.query())); context.preProcess(true); @@ -166,10 +158,10 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId // doc isn't deleted between the initial get and this call. GetResult getResult = context.indexShard() .getService() - .get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext()); - return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult); + .get(result, request.id(), request.storedFields(), request.fetchSourceContext()); + return new ExplainResponse(shardId.getIndexName(), request.id(), true, explanation, getResult); } else { - return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation); + return new ExplainResponse(shardId.getIndexName(), request.id(), true, explanation); } } catch (IOException e) { throw new OpenSearchException("Could not explain", e); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index df958d11fa725..a4807eff1acb4 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -195,7 +195,7 @@ private boolean canMatchShard(FieldCapabilitiesIndexRequest req) throws IOExcept return true; } assert req.nowInMillis() != 0L; - ShardSearchRequest searchRequest = new ShardSearchRequest(req.shardId(), null, req.nowInMillis(), AliasFilter.EMPTY); + ShardSearchRequest searchRequest = new ShardSearchRequest(req.shardId(), req.nowInMillis(), AliasFilter.EMPTY); searchRequest.source(new SearchSourceBuilder().query(req.indexFilter())); return searchService.canMatch(searchRequest).canMatch(); } diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 2796a8e9e47d7..9badf2db92f67 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -33,11 +33,11 @@ package org.opensearch.action.get; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; -import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -54,7 +54,7 @@ * A request to get a document (its source) from an index based on its id. Best created using * {@link org.opensearch.client.Requests#getRequest(String)}. *

- * The operation requires the {@link #index()}, {@link #type(String)} and {@link #id(String)} + * The operation requires the {@link #index()}} and {@link #id(String)} * to be set. * * @see GetResponse @@ -63,7 +63,6 @@ */ public class GetRequest extends SingleShardRequest implements RealtimeRequest { - private String type; private String id; private String routing; private String preference; @@ -79,13 +78,13 @@ public class GetRequest extends SingleShardRequest implements Realti private VersionType versionType = VersionType.INTERNAL; private long version = Versions.MATCH_ANY; - public GetRequest() { - type = MapperService.SINGLE_MAPPING_NAME; - } + public GetRequest() {} GetRequest(StreamInput in) throws IOException { super(in); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + in.readString(); + } id = in.readString(); routing = in.readOptionalString(); if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -106,22 +105,6 @@ public GetRequest() { */ public GetRequest(String index) { super(index); - this.type = MapperService.SINGLE_MAPPING_NAME; - } - - /** - * Constructs a new get request against the specified index with the type and id. - * - * @param index The index to get the document from - * @param type The type of the document - * @param id The id of the document - * @deprecated Types are in the process of being removed, use {@link GetRequest(String, String)} instead. - */ - @Deprecated - public GetRequest(String index, String type, String id) { - super(index); - this.type = type; - this.id = id; } /** @@ -133,15 +116,11 @@ public GetRequest(String index, String type, String id) { public GetRequest(String index, String id) { super(index); this.id = id; - this.type = MapperService.SINGLE_MAPPING_NAME; } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validateNonNullIndex(); - if (Strings.isEmpty(type)) { - validationException = addValidationError("type is missing", validationException); - } if (Strings.isEmpty(id)) { validationException = addValidationError("id is missing", validationException); } @@ -154,19 +133,6 @@ public ActionRequestValidationException validate() { return validationException; } - /** - * Sets the type of the document to fetch. - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public GetRequest type(@Nullable String type) { - if (type == null) { - type = MapperService.SINGLE_MAPPING_NAME; - } - this.type = type; - return this; - } - /** * Sets the id of the document to fetch. */ @@ -194,14 +160,6 @@ public GetRequest preference(String preference) { return this; } - /** - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public String type() { - return type; - } - public String id() { return id; } @@ -295,7 +253,9 @@ public VersionType versionType() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeOptionalString(routing); if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -313,7 +273,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return "get [" + index + "][" + type + "][" + id + "]: routing [" + routing + "]"; + return "get [" + index + "][" + id + "]: routing [" + routing + "]"; } } diff --git a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java index e47965595be2d..492a88b9d3821 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java @@ -52,15 +52,6 @@ public GetRequestBuilder(OpenSearchClient client, GetAction action, @Nullable St super(client, action, new GetRequest(index)); } - /** - * Sets the type of the document to fetch. If set to {@code null}, will use just the id to fetch the - * first document matching it. - */ - public GetRequestBuilder setType(@Nullable String type) { - request.type(type); - return this; - } - /** * Sets the id of the document to fetch. */ diff --git a/server/src/main/java/org/opensearch/action/get/GetResponse.java b/server/src/main/java/org/opensearch/action/get/GetResponse.java index b10057ed282b5..a15607d696195 100644 --- a/server/src/main/java/org/opensearch/action/get/GetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/GetResponse.java @@ -84,13 +84,6 @@ public String getIndex() { return getResult.getIndex(); } - /** - * The type of the document. - */ - public String getType() { - return getResult.getType(); - } - /** * The id of the document. */ @@ -209,10 +202,10 @@ public static GetResponse fromXContent(XContentParser parser) throws IOException // At this stage we ensure that we parsed enough information to return // a valid GetResponse instance. If it's not the case, we throw an // exception so that callers know it and can handle it correctly. - if (getResult.getIndex() == null && getResult.getType() == null && getResult.getId() == null) { + if (getResult.getIndex() == null && getResult.getId() == null) { throw new ParsingException( parser.getTokenLocation(), - String.format(Locale.ROOT, "Missing required fields [%s,%s,%s]", GetResult._INDEX, GetResult._TYPE, GetResult._ID) + String.format(Locale.ROOT, "Missing required fields [%s,%s]", GetResult._INDEX, GetResult._ID) ); } return new GetResponse(getResult); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java index 4308a9223919b..1ff684fcc5872 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java @@ -71,16 +71,6 @@ public String getIndex() { return response.getIndex(); } - /** - * The type of the document. - */ - public String getType() { - if (failure != null) { - return failure.getType(); - } - return response.getType(); - } - /** * The id of the document. */ diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 220659cfd894e..974799dd7bf4c 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -34,6 +34,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; @@ -54,6 +55,7 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentParser.Token; import org.opensearch.index.VersionType; +import org.opensearch.index.mapper.MapperService; import org.opensearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; @@ -73,7 +75,6 @@ public class MultiGetRequest extends ActionRequest private static final ParseField DOCS = new ParseField("docs"); private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField VERSION = new ParseField("version"); @@ -88,7 +89,6 @@ public class MultiGetRequest extends ActionRequest public static class Item implements Writeable, IndicesRequest, ToXContentObject { private String index; - private String type; private String id; private String routing; private String[] storedFields; @@ -102,7 +102,9 @@ public Item() { public Item(StreamInput in) throws IOException { index = in.readString(); - type = in.readOptionalString(); + if (in.getVersion().before(Version.V_2_0_0)) { + in.readOptionalString(); + } id = in.readString(); routing = in.readOptionalString(); if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -115,22 +117,6 @@ public Item(StreamInput in) throws IOException { fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); } - /** - * Constructs a single get item. - * - * @param index The index name - * @param type The type (can be null) - * @param id The id - * - * @deprecated Types are in the process of being removed, use {@link Item(String, String) instead}. - */ - @Deprecated - public Item(String index, @Nullable String type, String id) { - this.index = index; - this.type = type; - this.id = id; - } - public Item(String index, String id) { this.index = index; this.id = id; @@ -155,10 +141,6 @@ public Item index(String index) { return this; } - public String type() { - return this.type; - } - public String id() { return this.id; } @@ -217,7 +199,9 @@ public Item fetchSourceContext(FetchSourceContext fetchSourceContext) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - out.writeOptionalString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeOptionalString(routing); if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -234,7 +218,6 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(INDEX.getPreferredName(), index); - builder.field(TYPE.getPreferredName(), type); builder.field(ID.getPreferredName(), id); builder.field(ROUTING.getPreferredName(), routing); builder.field(STORED_FIELDS.getPreferredName(), storedFields); @@ -259,7 +242,6 @@ public boolean equals(Object o) { if (!id.equals(item.id)) return false; if (!index.equals(item.index)) return false; if (routing != null ? !routing.equals(item.routing) : item.routing != null) return false; - if (type != null ? !type.equals(item.type) : item.type != null) return false; if (versionType != item.versionType) return false; return true; @@ -268,7 +250,6 @@ public boolean equals(Object o) { @Override public int hashCode() { int result = index.hashCode(); - result = 31 * result + (type != null ? type.hashCode() : 0); result = 31 * result + id.hashCode(); result = 31 * result + (routing != null ? routing.hashCode() : 0); result = 31 * result + (storedFields != null ? Arrays.hashCode(storedFields) : 0); @@ -308,16 +289,6 @@ public MultiGetRequest add(Item item) { return this; } - /** - * @deprecated Types are in the process of being removed, use - * {@link MultiGetRequest#add(String, String)} instead. - */ - @Deprecated - public MultiGetRequest add(String index, @Nullable String type, String id) { - items.add(new Item(index, type, id)); - return this; - } - public MultiGetRequest add(String index, String id) { items.add(new Item(index, id)); return this; @@ -377,7 +348,6 @@ public MultiGetRequest refresh(boolean refresh) { public MultiGetRequest add( @Nullable String defaultIndex, - @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, @@ -395,18 +365,9 @@ public MultiGetRequest add( currentFieldName = parser.currentName(); } else if (token == Token.START_ARRAY) { if ("docs".equals(currentFieldName)) { - parseDocuments( - parser, - this.items, - defaultIndex, - defaultType, - defaultFields, - defaultFetchSource, - defaultRouting, - allowExplicitIndex - ); + parseDocuments(parser, this.items, defaultIndex, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex); } else if ("ids".equals(currentFieldName)) { - parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting); + parseIds(parser, this.items, defaultIndex, defaultFields, defaultFetchSource, defaultRouting); } else { final String message = String.format( Locale.ROOT, @@ -434,7 +395,6 @@ private static void parseDocuments( XContentParser parser, List items, @Nullable String defaultIndex, - @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, @@ -447,7 +407,6 @@ private static void parseDocuments( throw new IllegalArgumentException("docs array element should include an object"); } String index = defaultIndex; - String type = defaultType; String id = null; String routing = defaultRouting; List storedFields = null; @@ -465,8 +424,6 @@ private static void parseDocuments( throw new IllegalArgumentException("explicit index in multi get is not allowed"); } index = parser.text(); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - type = parser.text(); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { @@ -565,7 +522,7 @@ private static void parseDocuments( aFields = defaultFields; } items.add( - new Item(index, type, id).routing(routing) + new Item(index, id).routing(routing) .storedFields(aFields) .version(version) .versionType(versionType) @@ -578,7 +535,6 @@ public static void parseIds( XContentParser parser, List items, @Nullable String defaultIndex, - @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting @@ -589,7 +545,7 @@ public static void parseIds( throw new IllegalArgumentException("ids array element should only contain ids"); } items.add( - new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields) + new Item(defaultIndex, parser.text()).storedFields(defaultFields) .fetchSourceContext(defaultFetchSource) .routing(defaultRouting) ); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java index a068e4c66e5fa..56ac6cbd1b8c9 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java @@ -34,7 +34,6 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; -import org.opensearch.common.Nullable; /** * A multi get document action request builder. @@ -45,21 +44,21 @@ public MultiGetRequestBuilder(OpenSearchClient client, MultiGetAction action) { super(client, action, new MultiGetRequest()); } - public MultiGetRequestBuilder add(String index, @Nullable String type, String id) { - request.add(index, type, id); + public MultiGetRequestBuilder add(String index, String id) { + request.add(index, id); return this; } - public MultiGetRequestBuilder add(String index, @Nullable String type, Iterable ids) { + public MultiGetRequestBuilder add(String index, Iterable ids) { for (String id : ids) { - request.add(index, type, id); + request.add(index, id); } return this; } - public MultiGetRequestBuilder add(String index, @Nullable String type, String... ids) { + public MultiGetRequestBuilder add(String index, String... ids) { for (String id : ids) { - request.add(index, type, id); + request.add(index, id); } return this; } diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java index a5cf07c32b3e9..ca6249861dd50 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.get; import org.opensearch.OpenSearchException; +import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -43,6 +44,7 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentParser.Token; import org.opensearch.index.get.GetResult; +import org.opensearch.index.mapper.MapperService; import java.io.IOException; import java.util.ArrayList; @@ -53,7 +55,6 @@ public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ERROR = new ParseField("error"); private static final ParseField DOCS = new ParseField("docs"); @@ -64,20 +65,20 @@ public class MultiGetResponse extends ActionResponse implements Iterable new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), - e - ); - response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi_get for [{}]", shardId, item.id()), e); + response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.id(), e)); } } } diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index ccbe48ab40a51..ed77774bc01d3 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -47,7 +47,6 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; @@ -77,7 +76,7 @@ * Index request to index a typed JSON document into a specific index and make it searchable. Best * created using {@link org.opensearch.client.Requests#indexRequest(String)}. * - * The index requires the {@link #index()}, {@link #type(String)}, {@link #id(String)} and + * The index requires the {@link #index()}, {@link #id(String)} and * {@link #source(byte[], XContentType)} to be set. * * The source (content to index) can be set in its bytes form using ({@link #source(byte[], XContentType)}), @@ -103,8 +102,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement private static final ShardId NO_SHARD_ID = null; - // Set to null initially so we can know to override in bulk requests that have a default type. - private String type; private String id; @Nullable private String routing; @@ -143,7 +140,10 @@ public IndexRequest(StreamInput in) throws IOException { public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); - type = in.readOptionalString(); + if (in.getVersion().before(Version.V_2_0_0)) { + String type = in.readOptionalString(); + assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; + } id = in.readOptionalString(); routing = in.readOptionalString(); if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -181,7 +181,7 @@ public IndexRequest() { } /** - * Constructs a new index request against the specific index. The {@link #type(String)} + * Constructs a new index request against the specific index. The * {@link #source(byte[], XContentType)} must be set. */ public IndexRequest(String index) { @@ -189,44 +189,12 @@ public IndexRequest(String index) { this.index = index; } - /** - * Constructs a new index request against the specific index and type. The - * {@link #source(byte[], XContentType)} must be set. - * @deprecated Types are in the process of being removed. Use {@link #IndexRequest(String)} instead. - */ - @Deprecated - public IndexRequest(String index, String type) { - super(NO_SHARD_ID); - this.index = index; - this.type = type; - } - - /** - * Constructs a new index request against the index, type, id and using the source. - * - * @param index The index to index into - * @param type The type to index into - * @param id The id of document - * - * @deprecated Types are in the process of being removed. Use {@link #IndexRequest(String)} with {@link #id(String)} instead. - */ - @Deprecated - public IndexRequest(String index, String type, String id) { - super(NO_SHARD_ID); - this.index = index; - this.type = type; - this.id = id; - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); if (source == null) { validationException = addValidationError("source is missing", validationException); } - if (Strings.isEmpty(type())) { - validationException = addValidationError("type is missing", validationException); - } if (contentType == null) { validationException = addValidationError("content type is missing", validationException); } @@ -298,45 +266,6 @@ public XContentType getContentType() { return contentType; } - /** - * The type of the indexed document. - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public String type() { - if (type == null) { - return MapperService.SINGLE_MAPPING_NAME; - } - return type; - } - - /** - * Sets the type of the indexed document. - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public IndexRequest type(String type) { - this.type = type; - return this; - } - - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public IndexRequest defaultTypeIfNull(String defaultType) { - if (Strings.isNullOrEmpty(type)) { - type = defaultType; - } - return this; - } - /** * The id of the indexed document. If not set, will be automatically generated. */ @@ -687,7 +616,7 @@ public void process(Version indexCreatedVersion, @Nullable MappingMetadata mappi if (mappingMd != null) { // might as well check for routing here if (mappingMd.routing().required() && routing == null) { - throw new RoutingMissingException(concreteIndex, type(), id); + throw new RoutingMissingException(concreteIndex, id); } } @@ -733,9 +662,9 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeBody(StreamOutput out) throws IOException { - // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. - // So we use the type accessor method here to make the type non-null (will default it to "_doc"). - out.writeOptionalString(type()); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); + } out.writeOptionalString(id); out.writeOptionalString(routing); if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -782,7 +711,7 @@ public String toString() { } catch (Exception e) { // ignore } - return "index {[" + index + "][" + type() + "][" + id + "], source[" + sSource + "]}"; + return "index {[" + index + "][" + id + "], source[" + sSource + "]}"; } @Override diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java index ff13239717cda..f31efa3fc95d8 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java @@ -61,9 +61,10 @@ public IndexRequestBuilder(OpenSearchClient client, IndexAction action, @Nullabl /** * Sets the type to index the document to. + * @deprecated types will be removed */ + @Deprecated public IndexRequestBuilder setType(String type) { - request.type(type); return this; } diff --git a/server/src/main/java/org/opensearch/action/index/IndexResponse.java b/server/src/main/java/org/opensearch/action/index/IndexResponse.java index 9a25cbee43da2..be0826ce84f96 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/opensearch/action/index/IndexResponse.java @@ -59,12 +59,12 @@ public IndexResponse(StreamInput in) throws IOException { super(in); } - public IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean created) { - this(shardId, type, id, seqNo, primaryTerm, version, created ? Result.CREATED : Result.UPDATED); + public IndexResponse(ShardId shardId, String id, long seqNo, long primaryTerm, long version, boolean created) { + this(shardId, id, seqNo, primaryTerm, version, created ? Result.CREATED : Result.UPDATED); } - private IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { - super(shardId, type, id, seqNo, primaryTerm, version, assertCreatedOrUpdated(result)); + private IndexResponse(ShardId shardId, String id, long seqNo, long primaryTerm, long version, Result result) { + super(shardId, id, seqNo, primaryTerm, version, assertCreatedOrUpdated(result)); } private static Result assertCreatedOrUpdated(Result result) { @@ -82,7 +82,6 @@ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("IndexResponse["); builder.append("index=").append(getIndex()); - builder.append(",type=").append(getType()); builder.append(",id=").append(getId()); builder.append(",version=").append(getVersion()); builder.append(",result=").append(getResult().getLowercase()); @@ -117,7 +116,7 @@ public static void parseXContentFields(XContentParser parser, Builder context) t public static class Builder extends DocWriteResponse.Builder { @Override public IndexResponse build() { - IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version, result); + IndexResponse indexResponse = new IndexResponse(shardId, id, seqNo, primaryTerm, version, result); indexResponse.setForcedRefresh(forcedRefresh); if (shardInfo != null) { indexResponse.setShardInfo(shardInfo); diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java index e8f7b901f6e9c..6223f25488d88 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java @@ -200,7 +200,6 @@ private static List parseDocs(Map config) { "[types removal] specifying _type in pipeline simulation requests is deprecated" ); } - String type = ConfigurationUtils.readStringOrIntProperty(null, null, dataMap, Metadata.TYPE.getFieldName(), "_doc"); String id = ConfigurationUtils.readStringOrIntProperty(null, null, dataMap, Metadata.ID.getFieldName(), "_id"); String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, Metadata.ROUTING.getFieldName()); Long version = null; @@ -213,7 +212,7 @@ private static List parseDocs(Map config) { ConfigurationUtils.readStringProperty(null, null, dataMap, Metadata.VERSION_TYPE.getFieldName()) ); } - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, document); + IngestDocument ingestDocument = new IngestDocument(index, id, routing, version, versionType, document); if (dataMap.containsKey(Metadata.IF_SEQ_NO.getFieldName())) { Long ifSeqNo = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_SEQ_NO.getFieldName()); ingestDocument.setFieldValue(Metadata.IF_SEQ_NO.getFieldName(), ifSeqNo); diff --git a/server/src/main/java/org/opensearch/action/ingest/WriteableIngestDocument.java b/server/src/main/java/org/opensearch/action/ingest/WriteableIngestDocument.java index 7b451b23d0a97..2f8c65486c22f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/WriteableIngestDocument.java +++ b/server/src/main/java/org/opensearch/action/ingest/WriteableIngestDocument.java @@ -66,24 +66,22 @@ final class WriteableIngestDocument implements Writeable, ToXContentFragment { a -> { HashMap sourceAndMetadata = new HashMap<>(); sourceAndMetadata.put(Metadata.INDEX.getFieldName(), a[0]); - sourceAndMetadata.put(Metadata.TYPE.getFieldName(), a[1]); - sourceAndMetadata.put(Metadata.ID.getFieldName(), a[2]); + sourceAndMetadata.put(Metadata.ID.getFieldName(), a[1]); + if (a[2] != null) { + sourceAndMetadata.put(Metadata.ROUTING.getFieldName(), a[2]); + } if (a[3] != null) { - sourceAndMetadata.put(Metadata.ROUTING.getFieldName(), a[3]); + sourceAndMetadata.put(Metadata.VERSION.getFieldName(), a[3]); } if (a[4] != null) { - sourceAndMetadata.put(Metadata.VERSION.getFieldName(), a[4]); - } - if (a[5] != null) { - sourceAndMetadata.put(Metadata.VERSION_TYPE.getFieldName(), a[5]); + sourceAndMetadata.put(Metadata.VERSION_TYPE.getFieldName(), a[4]); } - sourceAndMetadata.putAll((Map) a[6]); - return new WriteableIngestDocument(new IngestDocument(sourceAndMetadata, (Map) a[7])); + sourceAndMetadata.putAll((Map) a[5]); + return new WriteableIngestDocument(new IngestDocument(sourceAndMetadata, (Map) a[6])); } ); static { INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(Metadata.INDEX.getFieldName())); - INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(Metadata.TYPE.getFieldName())); INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(Metadata.ID.getFieldName())); INGEST_DOC_PARSER.declareString(optionalConstructorArg(), new ParseField(Metadata.ROUTING.getFieldName())); INGEST_DOC_PARSER.declareLong(optionalConstructorArg(), new ParseField(Metadata.VERSION.getFieldName())); diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index 2cff7cb5d270f..c45b6477d30f0 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -192,7 +192,6 @@ public static void readMultiLineFormat( CheckedBiConsumer consumer, String[] indices, IndicesOptions indicesOptions, - String[] types, String routing, String searchType, Boolean ccsMinimizeRoundtrips, @@ -225,9 +224,6 @@ public static void readMultiLineFormat( if (indicesOptions != null) { searchRequest.indicesOptions(indicesOptions); } - if (types != null && types.length > 0) { - searchRequest.types(types); - } if (routing != null) { searchRequest.routing(routing); } @@ -256,8 +252,6 @@ public static void readMultiLineFormat( throw new IllegalArgumentException("explicit index in multi search is not allowed"); } searchRequest.indices(nodeStringArrayValue(value)); - } else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) { - searchRequest.types(nodeStringArrayValue(value)); } else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) { searchRequest.searchType(nodeStringValue(value, null)); } else if ("ccs_minimize_roundtrips".equals(entry.getKey()) || "ccsMinimizeRoundtrips".equals(entry.getKey())) { @@ -359,9 +353,6 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild xContentBuilder.field("ignore_unavailable", request.indicesOptions().ignoreUnavailable()); xContentBuilder.field("allow_no_indices", request.indicesOptions().allowNoIndices()); } - if (request.types() != null) { - xContentBuilder.field("types", request.types()); - } if (request.searchType() != null) { xContentBuilder.field("search_type", request.searchType().name().toLowerCase(Locale.ROOT)); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index b753ec059b7af..c4b97c35bc405 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -106,8 +106,6 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private Integer preFilterShardSize; - private String[] types = Strings.EMPTY_ARRAY; - private boolean ccsMinimizeRoundtrips = true; public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); @@ -204,7 +202,6 @@ private SearchRequest( this.scroll = searchRequest.scroll; this.searchType = searchRequest.searchType; this.source = searchRequest.source; - this.types = searchRequest.types; this.localClusterAlias = localClusterAlias; this.absoluteStartMillis = absoluteStartMillis; this.finalReduce = finalReduce; @@ -225,7 +222,15 @@ public SearchRequest(StreamInput in) throws IOException { preference = in.readOptionalString(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - types = in.readStringArray(); + if (in.getVersion().before(Version.V_2_0_0)) { + // types no longer relevant so ignore + String[] types = in.readStringArray(); + if (types.length > 0) { + throw new IllegalStateException( + "types are no longer supported in search requests but found [" + Arrays.toString(types) + "]" + ); + } + } indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); batchedReduceSize = in.readVInt(); @@ -262,7 +267,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(preference); out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - out.writeStringArray(types); + if (out.getVersion().before(Version.V_2_0_0)) { + // types not supported so send an empty array to previous versions + out.writeStringArray(Strings.EMPTY_ARRAY); + } indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); out.writeVInt(batchedReduceSize); @@ -408,35 +416,6 @@ public void setCcsMinimizeRoundtrips(boolean ccsMinimizeRoundtrips) { this.ccsMinimizeRoundtrips = ccsMinimizeRoundtrips; } - /** - * The document types to execute the search against. Defaults to be executed against - * all types. - * - * @deprecated Types are in the process of being removed. Instead of using a type, prefer to - * filter on a field on the document. - */ - @Deprecated - public String[] types() { - return types; - } - - /** - * The document types to execute the search against. Defaults to be executed against - * all types. - * - * @deprecated Types are in the process of being removed. Instead of using a type, prefer to - * filter on a field on the document. - */ - @Deprecated - public SearchRequest types(String... types) { - Objects.requireNonNull(types, "types must not be null"); - for (String type : types) { - Objects.requireNonNull(type, "type must not be null"); - } - this.types = types; - return this; - } - /** * A comma separated list of routing values to control the shards the search will be executed on. */ @@ -702,9 +681,6 @@ public final String buildDescription() { sb.append("indices["); Strings.arrayToDelimitedString(indices, ",", sb); sb.append("], "); - sb.append("types["); - Strings.arrayToDelimitedString(types, ",", sb); - sb.append("], "); sb.append("search_type[").append(searchType).append("], "); if (scroll != null) { sb.append("scroll[").append(scroll.keepAlive()).append("], "); @@ -733,7 +709,6 @@ public boolean equals(Object o) { && Objects.equals(source, that.source) && Objects.equals(requestCache, that.requestCache) && Objects.equals(scroll, that.scroll) - && Arrays.equals(types, that.types) && Objects.equals(batchedReduceSize, that.batchedReduceSize) && Objects.equals(maxConcurrentShardRequests, that.maxConcurrentShardRequests) && Objects.equals(preFilterShardSize, that.preFilterShardSize) @@ -755,7 +730,6 @@ public int hashCode() { source, requestCache, scroll, - Arrays.hashCode(types), indicesOptions, batchedReduceSize, maxConcurrentShardRequests, @@ -777,8 +751,6 @@ public String toString() { + Arrays.toString(indices) + ", indicesOptions=" + indicesOptions - + ", types=" - + Arrays.toString(types) + ", routing='" + routing + '\'' diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index 758c72b5926e3..6def33f82b7bd 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -72,17 +72,6 @@ public SearchRequestBuilder setIndices(String... indices) { return this; } - /** - * The document types to execute the search against. Defaults to be executed against - * all types. - * @deprecated Types are going away, prefer filtering on a field. - */ - @Deprecated - public SearchRequestBuilder setTypes(String... types) { - request.types(types); - return this; - } - /** * The search type to execute, defaults to {@link SearchType#DEFAULT}. */ diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java index 875fb2019b89e..76770245a3dbe 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java @@ -32,10 +32,10 @@ package org.opensearch.action.search; -import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.RAMOutputStream; import org.opensearch.LegacyESVersion; import org.opensearch.Version; +import org.opensearch.common.io.stream.BytesStreamInput; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; @@ -88,7 +88,7 @@ static String buildScrollId(AtomicArray searchPhase static ParsedScrollId parseScrollId(String scrollId) { try { byte[] bytes = Base64.getUrlDecoder().decode(scrollId); - ByteArrayDataInput in = new ByteArrayDataInput(bytes); + BytesStreamInput in = new BytesStreamInput(bytes); final boolean includeContextUUID; final String type; final String firstChunk = in.readString(); diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 229482a0f76b2..0ea08eccae33d 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -878,8 +878,8 @@ protected void doRun() { // resolve it from the index settings request.waitForActiveShards(indexMetadata.getWaitForActiveShards()); } - assert request - .waitForActiveShards() != ActiveShardCount.DEFAULT : "request waitForActiveShards must be set in resolveRequest"; + assert request.waitForActiveShards() != ActiveShardCount.DEFAULT + : "request waitForActiveShards must be set in resolveRequest"; final ShardRouting primary = state.getRoutingTable().shardRoutingTable(request.shardId()).primaryShard(); if (primary == null || primary.active() == false) { diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java index b2cc1f2941f60..037f4b95e3c73 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -72,16 +72,6 @@ public String getIndex() { return response.getIndex(); } - /** - * The type of the document. - */ - public String getType() { - if (failure != null) { - return failure.getType(); - } - return response.getType(); - } - /** * The id of the document. */ diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java index f3617ca30ab79..aada19b081a1c 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java @@ -42,7 +42,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.index.mapper.MapperService; import java.io.IOException; import java.util.ArrayList; @@ -80,8 +79,8 @@ public MultiTermVectorsRequest add(TermVectorsRequest termVectorsRequest) { return this; } - public MultiTermVectorsRequest add(String index, @Nullable String type, String id) { - requests.add(new TermVectorsRequest(index, type, id)); + public MultiTermVectorsRequest add(String index, String id) { + requests.add(new TermVectorsRequest(index, id)); return this; } @@ -132,9 +131,6 @@ public void add(TermVectorsRequest template, @Nullable XContentParser parser) th throw new IllegalArgumentException("docs array element should include an object"); } TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template); - if (termVectorsRequest.type() == null) { - termVectorsRequest.type(MapperService.SINGLE_MAPPING_NAME); - } TermVectorsRequest.parseRequest(termVectorsRequest, parser); add(termVectorsRequest); } diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java index 33ecfd46be39d..51711c01beb44 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java @@ -34,7 +34,6 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; -import org.opensearch.common.Nullable; public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder { @@ -42,16 +41,16 @@ public MultiTermVectorsRequestBuilder(OpenSearchClient client, MultiTermVectorsA super(client, action, new MultiTermVectorsRequest()); } - public MultiTermVectorsRequestBuilder add(String index, @Nullable String type, Iterable ids) { + public MultiTermVectorsRequestBuilder add(String index, Iterable ids) { for (String id : ids) { - request.add(index, type, id); + request.add(index, id); } return this; } - public MultiTermVectorsRequestBuilder add(String index, @Nullable String type, String... ids) { + public MultiTermVectorsRequestBuilder add(String index, String... ids) { for (String id : ids) { - request.add(index, type, id); + request.add(index, id); } return this; } diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java index f31aa15bf8167..599c2fa883dc7 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.termvectors; import org.opensearch.OpenSearchException; +import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -51,20 +52,21 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable */ public static class Failure implements Writeable { private final String index; - private final String type; private final String id; private final Exception cause; - public Failure(String index, String type, String id, Exception cause) { + public Failure(String index, String id, Exception cause) { this.index = index; - this.type = type; this.id = id; this.cause = cause; } public Failure(StreamInput in) throws IOException { index = in.readString(); - type = in.readOptionalString(); + if (in.getVersion().before(Version.V_2_0_0)) { + // ignore removed type from pre-2.0.0 versions + in.readOptionalString(); + } id = in.readString(); cause = in.readException(); } @@ -76,16 +78,6 @@ public String getIndex() { return this.index; } - /** - * The type of the action. - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public String getType() { - return type; - } - /** * The id of the action. */ @@ -103,7 +95,10 @@ public Exception getCause() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - out.writeOptionalString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + // types no longer supported + out.writeOptionalString(null); + } out.writeString(id); out.writeException(cause); } @@ -138,7 +133,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); Failure failure = response.getFailure(); builder.field(Fields._INDEX, failure.getIndex()); - builder.field(Fields._TYPE, failure.getType()); builder.field(Fields._ID, failure.getId()); OpenSearchException.generateFailureXContent(builder, params, failure.getCause(), true); builder.endObject(); diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index c9a98d09d717f..214d5f0d6d4fa 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; @@ -45,7 +46,6 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentBuilder; @@ -53,7 +53,7 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; -import org.opensearch.rest.action.document.RestTermVectorsAction; +import org.opensearch.index.mapper.MapperService; import java.io.IOException; import java.util.ArrayList; @@ -71,14 +71,11 @@ * Request returning the term vector (doc frequency, positions, offsets) for a * document. *

- * Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are + * Note, the {@link #index()}, and {@link #id(String)} are * required. */ public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TermVectorsRequest.class); - private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField VERSION = new ParseField("version"); @@ -91,8 +88,6 @@ public class TermVectorsRequest extends SingleShardRequest i private static final ParseField FILTER = new ParseField("filter"); private static final ParseField DOC = new ParseField("doc"); - private String type; - private String id; private BytesReference doc; @@ -176,7 +171,10 @@ public TermVectorsRequest() {} TermVectorsRequest(StreamInput in) throws IOException { super(in); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + // types no longer supported; ignore for BWC + in.readString(); + } id = in.readString(); if (in.readBoolean()) { @@ -218,24 +216,20 @@ public TermVectorsRequest() {} /** * Constructs a new term vector request for a document that will be fetch - * from the provided index. Use {@link #type(String)} and - * {@link #id(String)} to specify the document to load. + * from the provided index. Use {@link #id(String)} to specify the document to load. */ - public TermVectorsRequest(String index, String type, String id) { + public TermVectorsRequest(String index, String id) { super(index); this.id = id; - this.type = type; } /** * Constructs a new term vector request for a document that will be fetch - * from the provided index. Use {@link #type(String)} and - * {@link #id(String)} to specify the document to load. + * from the provided index. Use {@link #id(String)} to specify the document to load. */ public TermVectorsRequest(TermVectorsRequest other) { super(other.index()); this.id = other.id(); - this.type = other.type(); if (other.doc != null) { this.doc = new BytesArray(other.doc().toBytesRef(), true); this.xContentType = other.xContentType; @@ -258,7 +252,6 @@ public TermVectorsRequest(TermVectorsRequest other) { public TermVectorsRequest(MultiGetRequest.Item item) { super(item.index()); this.id = item.id(); - this.type = item.type(); this.selectedFields(item.storedFields()); this.routing(item.routing()); } @@ -267,21 +260,6 @@ public EnumSet getFlags() { return flagsEnum; } - /** - * Sets the type of document to get the term vector for. - */ - public TermVectorsRequest type(String type) { - this.type = type; - return this; - } - - /** - * Returns the type of document to get the term vector for. - */ - public String type() { - return type; - } - /** * Returns the id of document the term vector is requested for. */ @@ -535,9 +513,6 @@ private void setFlag(Flag flag, boolean set) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validateNonNullIndex(); - if (type == null) { - validationException = ValidateActions.addValidationError("type is missing", validationException); - } if (id == null && doc == null) { validationException = ValidateActions.addValidationError("id or doc is missing", validationException); } @@ -547,7 +522,10 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + // types no longer supported; send "_doc" for bwc + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeBoolean(doc != null); @@ -631,9 +609,6 @@ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentP } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { // the following is important for multi request parsing. termVectorsRequest.index = parser.text(); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - termVectorsRequest.type = parser.text(); - deprecationLogger.deprecate("termvectors_with_types", RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.doc != null) { throw new OpenSearchParseException( diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java index c985ebd43dbbe..7294db072ad38 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java @@ -54,11 +54,10 @@ public TermVectorsRequestBuilder(OpenSearchClient client, TermVectorsAction acti /** * Constructs a new term vector request builder for a document that will be fetch - * from the provided index. Use {@code index}, {@code type} and - * {@code id} to specify the document to load. + * from the provided index. Use {@code index}, and {@code id} to specify the document to load. */ - public TermVectorsRequestBuilder(OpenSearchClient client, TermVectorsAction action, String index, String type, String id) { - super(client, action, new TermVectorsRequest(index, type, id)); + public TermVectorsRequestBuilder(OpenSearchClient client, TermVectorsAction action, String index, String id) { + super(client, action, new TermVectorsRequest(index, id)); } /** @@ -69,14 +68,6 @@ public TermVectorsRequestBuilder setIndex(String index) { return this; } - /** - * Sets the type of the document. - */ - public TermVectorsRequestBuilder setType(String type) { - request.type(type); - return this; - } - /** * Sets the id of the document. */ diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java index 17d269935574e..870609d526909 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java @@ -40,6 +40,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; +import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; @@ -49,6 +50,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.mapper.MapperService; import java.io.IOException; import java.util.Collections; @@ -77,7 +79,6 @@ private static class FieldStrings { public static final String END_OFFSET = "end_offset"; public static final String PAYLOAD = "payload"; public static final String _INDEX = "_index"; - public static final String _TYPE = "_type"; public static final String _ID = "_id"; public static final String _VERSION = "_version"; public static final String FOUND = "found"; @@ -89,7 +90,6 @@ private static class FieldStrings { private BytesReference termVectors; private BytesReference headerRef; private String index; - private String type; private String id; private long docVersion; private boolean exists = false; @@ -104,9 +104,8 @@ private static class FieldStrings { int[] currentEndOffset = new int[0]; BytesReference[] currentPayloads = new BytesReference[0]; - public TermVectorsResponse(String index, String type, String id) { + public TermVectorsResponse(String index, String id) { this.index = index; - this.type = type; this.id = id; } @@ -114,7 +113,10 @@ public TermVectorsResponse(String index, String type, String id) { TermVectorsResponse(StreamInput in) throws IOException { index = in.readString(); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + // ignore deprecated/removed type + in.readString(); + } id = in.readString(); docVersion = in.readVLong(); exists = in.readBoolean(); @@ -129,7 +131,10 @@ public TermVectorsResponse(String index, String type, String id) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - out.writeString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + // send empty array to previous version since types are no longer supported + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeVLong(docVersion); final boolean docExists = isExists(); @@ -180,11 +185,9 @@ public int size() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { assert index != null; - assert type != null; assert id != null; builder.startObject(); builder.field(FieldStrings._INDEX, index); - builder.field(FieldStrings._TYPE, type); if (!isArtificial()) { builder.field(FieldStrings._ID, id); } @@ -420,10 +423,6 @@ public String getIndex() { return index; } - public String getType() { - return type; - } - public String getId() { return id; } diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java index 68bd89df0a397..127b31f329d09 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -92,7 +92,6 @@ protected void doExecute(Task task, final MultiTermVectorsRequest request, final null, new MultiTermVectorsResponse.Failure( termVectorsRequest.index(), - termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index()) ) @@ -108,9 +107,8 @@ protected void doExecute(Task task, final MultiTermVectorsRequest request, final null, new MultiTermVectorsResponse.Failure( concreteSingleIndex, - termVectorsRequest.type(), termVectorsRequest.id(), - new RoutingMissingException(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id()) + new RoutingMissingException(concreteSingleIndex, termVectorsRequest.id()) ) ) ); @@ -166,12 +164,7 @@ public void onFailure(Exception e) { shardRequest.locations.get(i), new MultiTermVectorsItemResponse( null, - new MultiTermVectorsResponse.Failure( - shardRequest.index(), - termVectorsRequest.type(), - termVectorsRequest.id(), - e - ) + new MultiTermVectorsResponse.Failure(shardRequest.index(), termVectorsRequest.id(), e) ) ); } diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java index ded3bffa2bc75..511b68965ebdf 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -117,16 +117,15 @@ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequ } else { logger.debug( () -> new ParameterizedMessage( - "{} failed to execute multi term vectors for [{}]/[{}]", + "{} failed to execute multi term vectors for [{}]", shardId, - termVectorsRequest.type(), termVectorsRequest.id() ), e ); response.add( request.locations.get(i), - new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), e) + new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.id(), e) ); } } diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java index 73471ed76e35c..3cfd9cf7da7c5 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java @@ -107,7 +107,7 @@ protected void resolveRequest(ClusterState state, InternalRequest request) { request.request().routing(state.metadata().resolveIndexRouting(request.request().routing(), request.request().index())); // Fail fast on the node that received the request. if (request.request().routing() == null && state.getMetadata().routingRequired(request.concreteIndex())) { - throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); + throw new RoutingMissingException(request.concreteIndex(), request.request().id()); } } diff --git a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java index 475f16cb96ae0..387c0d24ed4df 100644 --- a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java @@ -142,7 +142,7 @@ public static void resolveAndValidateRouting(Metadata metadata, String concreteI request.routing((metadata.resolveWriteIndexRouting(request.routing(), request.index()))); // Fail fast on the node that received the request, rather than failing when translating on the index or delete request. if (request.routing() == null && metadata.routingRequired(concreteIndex)) { - throw new RoutingMissingException(concreteIndex, request.type(), request.id()); + throw new RoutingMissingException(concreteIndex, request.id()); } } @@ -226,7 +226,6 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< UpdateResponse update = new UpdateResponse( response.getShardInfo(), response.getShardId(), - response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), @@ -267,7 +266,6 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< UpdateResponse update = new UpdateResponse( response.getShardInfo(), response.getShardId(), - response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), @@ -296,7 +294,6 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< UpdateResponse update = new UpdateResponse( response.getShardInfo(), response.getShardId(), - response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), @@ -325,7 +322,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< if (indexServiceOrNull != null) { IndexShard shard = indexService.getShardOrNull(shardId.getId()); if (shard != null) { - shard.noopUpdate(request.type()); + shard.noopUpdate(); } } listener.onResponse(update); diff --git a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java index d70cd9fbca8f4..0da41a3028edf 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java @@ -82,8 +82,7 @@ public UpdateHelper(ScriptService scriptService) { * Prepares an update request by converting it into an index or delete request or an update response (no action). */ public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) { - final GetResult getResult = indexShard.getService() - .getForUpdate(request.type(), request.id(), request.ifSeqNo(), request.ifPrimaryTerm()); + final GetResult getResult = indexShard.getService().getForUpdate(request.id(), request.ifSeqNo(), request.ifPrimaryTerm()); return prepare(indexShard.shardId(), request, getResult, nowInMillis); } @@ -97,7 +96,7 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult return prepareUpsert(shardId, request, getResult, nowInMillis); } else if (getResult.internalSourceRef() == null) { // no source, we can't do anything, throw a failure... - throw new DocumentSourceMissingException(shardId, request.type(), request.id()); + throw new DocumentSourceMissingException(shardId, request.id()); } else if (request.script() == null && request.doc() != null) { // The request has no script, it is a new doc that should be merged with the old document return prepareUpdateIndexRequest(shardId, request, getResult, request.detectNoop()); @@ -138,7 +137,7 @@ Tuple> executeScriptedUpsert(Map ctx = new HashMap<>(16); ctx.put(ContextFields.OP, UpdateOpType.INDEX.toString()); // The default operation is "index" ctx.put(ContextFields.INDEX, getResult.getIndex()); - ctx.put(ContextFields.TYPE, getResult.getType()); ctx.put(ContextFields.ID, getResult.getId()); ctx.put(ContextFields.VERSION, getResult.getVersion()); ctx.put(ContextFields.ROUTING, routing); @@ -288,7 +282,6 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes switch (operation) { case INDEX: final IndexRequest indexRequest = Requests.indexRequest(request.index()) - .type(request.type()) .id(request.id()) .routing(routing) .source(updatedSourceAsMap, updateSourceContentType) @@ -300,7 +293,6 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); case DELETE: DeleteRequest deleteRequest = Requests.deleteRequest(request.index()) - .type(request.type()) .id(request.id()) .routing(routing) .setIfSeqNo(getResult.getSeqNo()) @@ -313,7 +305,6 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes // If it was neither an INDEX or DELETE operation, treat it as a noop UpdateResponse update = new UpdateResponse( shardId, - getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), @@ -386,7 +377,6 @@ public static GetResult extractGetResult( // TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType) return new GetResult( concreteIndex, - request.type(), request.id(), seqNo, primaryTerm, diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index ee7ed695dcba8..36be9f0160c9a 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -34,6 +34,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; @@ -122,8 +123,6 @@ public class UpdateRequest extends InstanceShardOperationRequest PARSER.declareLong(UpdateRequest::setIfPrimaryTerm, IF_PRIMARY_TERM); } - // Set to null initially so we can know to override in bulk requests that have a default type. - private String type; private String id; @Nullable private String routing; @@ -160,7 +159,10 @@ public UpdateRequest(StreamInput in) throws IOException { public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); waitForActiveShards = ActiveShardCount.readFrom(in); - type = in.readString(); + if (in.getVersion().before(Version.V_2_0_0)) { + String type = in.readString(); + assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; + } id = in.readString(); routing = in.readOptionalString(); if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -210,25 +212,12 @@ public UpdateRequest(String index, String id) { this.id = id; } - /** - * @deprecated Types are in the process of being removed. Use {@link #UpdateRequest(String, String)} instead. - */ - @Deprecated - public UpdateRequest(String index, String type, String id) { - super(index); - this.type = type; - this.id = id; - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); if (upsertRequest != null && upsertRequest.version() != Versions.MATCH_ANY) { validationException = addValidationError("can't provide version in upsert request", validationException); } - if (Strings.isEmpty(type())) { - validationException = addValidationError("type is missing", validationException); - } if (Strings.isEmpty(id)) { validationException = addValidationError("id is missing", validationException); } @@ -263,46 +252,6 @@ public ActionRequestValidationException validate() { return validationException; } - /** - * The type of the indexed document. - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public String type() { - if (type == null) { - return MapperService.SINGLE_MAPPING_NAME; - } - return type; - } - - /** - * Sets the type of the indexed document. - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public UpdateRequest type(String type) { - this.type = type; - return this; - } - - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public UpdateRequest defaultTypeIfNull(String defaultType) { - if (Strings.isNullOrEmpty(type)) { - type = defaultType; - } - return this; - } - /** * The id of the indexed document. */ @@ -934,9 +883,9 @@ public void writeThin(StreamOutput out) throws IOException { private void doWrite(StreamOutput out, boolean thin) throws IOException { waitForActiveShards.writeTo(out); - // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. - // So we use the type accessor method here to make the type non-null (will default it to "_doc"). - out.writeString(type()); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeOptionalString(routing); if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { @@ -956,7 +905,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { out.writeBoolean(true); // make sure the basics are set doc.index(index); - doc.type(type); doc.id(id); if (thin) { doc.writeThin(out); @@ -974,7 +922,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { out.writeBoolean(true); // make sure the basics are set upsertRequest.index(index); - upsertRequest.type(type); upsertRequest.id(id); if (thin) { upsertRequest.writeThin(out); @@ -1054,13 +1001,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - StringBuilder res = new StringBuilder().append("update {[") - .append(index) - .append("][") - .append(type()) - .append("][") - .append(id) - .append("]"); + StringBuilder res = new StringBuilder().append("update {[").append(index).append("][").append(id).append("]"); res.append(", doc_as_upsert[").append(docAsUpsert).append("]"); if (doc != null) { res.append(", doc[").append(doc).append("]"); diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java index 3acbfe6dced12..fb8bf243a9fb5 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java @@ -54,15 +54,21 @@ public UpdateRequestBuilder(OpenSearchClient client, UpdateAction action) { super(client, action, new UpdateRequest()); } + @Deprecated public UpdateRequestBuilder(OpenSearchClient client, UpdateAction action, String index, String type, String id) { - super(client, action, new UpdateRequest(index, type, id)); + super(client, action, new UpdateRequest(index, id)); + } + + public UpdateRequestBuilder(OpenSearchClient client, UpdateAction action, String index, String id) { + super(client, action, new UpdateRequest(index, id)); } /** * Sets the type of the indexed document. + * @deprecated types will be removed */ + @Deprecated public UpdateRequestBuilder setType(String type) { - request.type(type); return this; } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java index 4842d7dd03b77..2c6efaf3c5f6b 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java @@ -69,21 +69,12 @@ public UpdateResponse(StreamInput in) throws IOException { * Constructor to be used when a update didn't translate in a write. * For example: update script with operation set to none */ - public UpdateResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { - this(new ShardInfo(0, 0), shardId, type, id, seqNo, primaryTerm, version, result); + public UpdateResponse(ShardId shardId, String id, long seqNo, long primaryTerm, long version, Result result) { + this(new ShardInfo(0, 0), shardId, id, seqNo, primaryTerm, version, result); } - public UpdateResponse( - ShardInfo shardInfo, - ShardId shardId, - String type, - String id, - long seqNo, - long primaryTerm, - long version, - Result result - ) { - super(shardId, type, id, seqNo, primaryTerm, version, result); + public UpdateResponse(ShardInfo shardInfo, ShardId shardId, String id, long seqNo, long primaryTerm, long version, Result result) { + super(shardId, id, seqNo, primaryTerm, version, result); setShardInfo(shardInfo); } @@ -137,7 +128,6 @@ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("UpdateResponse["); builder.append("index=").append(getIndex()); - builder.append(",type=").append(getType()); builder.append(",id=").append(getId()); builder.append(",version=").append(getVersion()); builder.append(",seqNo=").append(getSeqNo()); @@ -190,15 +180,14 @@ public void setGetResult(GetResult getResult) { public UpdateResponse build() { UpdateResponse update; if (shardInfo != null) { - update = new UpdateResponse(shardInfo, shardId, type, id, seqNo, primaryTerm, version, result); + update = new UpdateResponse(shardInfo, shardId, id, seqNo, primaryTerm, version, result); } else { - update = new UpdateResponse(shardId, type, id, seqNo, primaryTerm, version, result); + update = new UpdateResponse(shardId, id, seqNo, primaryTerm, version, result); } if (getResult != null) { update.setGetResult( new GetResult( update.getIndex(), - update.getType(), update.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index b6192643ac3fc..0be8b4a1573d5 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -243,9 +243,9 @@ public interface Client extends OpenSearchClient, Releasable { BulkRequestBuilder prepareBulk(); /** - * Executes a bulk of index / delete operations with default index and/or type + * Executes a bulk of index / delete operations with default index */ - BulkRequestBuilder prepareBulk(@Nullable String globalIndex, @Nullable String globalType); + BulkRequestBuilder prepareBulk(@Nullable String globalIndex); /** * Gets the document that was indexed from an index with a type and id. @@ -271,9 +271,9 @@ public interface Client extends OpenSearchClient, Releasable { GetRequestBuilder prepareGet(); /** - * Gets the document that was indexed from an index with a type (optional) and id. + * Gets the document that was indexed from an index with an id. */ - GetRequestBuilder prepareGet(String index, @Nullable String type, String id); + GetRequestBuilder prepareGet(String index, String id); /** * Multi get documents. @@ -375,10 +375,9 @@ public interface Client extends OpenSearchClient, Releasable { * Builder for the term vector request. * * @param index The index to load the document from - * @param type The type of the document * @param id The id of the document */ - TermVectorsRequestBuilder prepareTermVectors(String index, String type, String id); + TermVectorsRequestBuilder prepareTermVectors(String index, String id); /** * Multi get term vectors. @@ -399,10 +398,9 @@ public interface Client extends OpenSearchClient, Releasable { * Computes a score explanation for the specified request. * * @param index The index this explain is targeted for - * @param type The type this explain is targeted for * @param id The document identifier this explain is targeted for */ - ExplainRequestBuilder prepareExplain(String index, String type, String id); + ExplainRequestBuilder prepareExplain(String index, String id); /** * Computes a score explanation for the specified request. diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 7cf81967d2755..2710035dce906 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -66,9 +66,6 @@ import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequestBuilder; -import org.opensearch.action.admin.indices.flush.SyncedFlushResponse; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; @@ -441,29 +438,6 @@ public interface IndicesAdminClient extends OpenSearchClient { */ FlushRequestBuilder prepareFlush(String... indices); - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - * - * @param request The sync flush request - * @return A result future - * @see org.opensearch.client.Requests#syncedFlushRequest(String...) - */ - ActionFuture syncedFlush(SyncedFlushRequest request); - - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - * - * @param request The sync flush request - * @param listener A listener to be notified with a result - * @see org.opensearch.client.Requests#syncedFlushRequest(String...) - */ - void syncedFlush(SyncedFlushRequest request, ActionListener listener); - - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - */ - SyncedFlushRequestBuilder prepareSyncedFlush(String... indices); - /** * Explicitly force merge one or more indices into a the number of segments. * diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index b62f6ee7f7234..d89f55a37a9cf 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -61,7 +61,6 @@ import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest; @@ -98,8 +97,8 @@ public static IndexRequest indexRequest() { } /** - * Create an index request against a specific index. Note the {@link IndexRequest#type(String)} must be - * set as well and optionally the {@link IndexRequest#id(String)}. + * Create an index request against a specific index. + * Note that setting {@link IndexRequest#id(String)} is optional. * * @param index The index name to index the request against * @return The index request @@ -110,8 +109,8 @@ public static IndexRequest indexRequest(String index) { } /** - * Creates a delete request against a specific index. Note the {@link DeleteRequest#type(String)} and - * {@link DeleteRequest#id(String)} must be set. + * Creates a delete request against a specific index. + * Note that {@link DeleteRequest#id(String)} must be set. * * @param index The index name to delete from * @return The delete request @@ -130,7 +129,7 @@ public static BulkRequest bulkRequest() { /** * Creates a get request to get the JSON source from an index based on a type and id. Note, the - * {@link GetRequest#type(String)} and {@link GetRequest#id(String)} must be set. + * {@link GetRequest#id(String)} must be set. * * @param index The index to get the JSON source from * @return The get request @@ -274,17 +273,6 @@ public static FlushRequest flushRequest(String... indices) { return new FlushRequest(indices); } - /** - * Creates a synced flush indices request. - * - * @param indices The indices to sync flush. Use {@code null} or {@code _all} to execute against all indices - * @return The synced flush request - * @see org.opensearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) - */ - public static SyncedFlushRequest syncedFlushRequest(String... indices) { - return new SyncedFlushRequest(indices); - } - /** * Creates a force merge request. * diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 031e4507e08cd..4a5c19819613e 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -200,10 +200,6 @@ import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.flush.SyncedFlushAction; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequestBuilder; -import org.opensearch.action.admin.indices.flush.SyncedFlushResponse; import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; @@ -518,8 +514,8 @@ public BulkRequestBuilder prepareBulk() { } @Override - public BulkRequestBuilder prepareBulk(@Nullable String globalIndex, @Nullable String globalType) { - return new BulkRequestBuilder(this, BulkAction.INSTANCE, globalIndex, globalType); + public BulkRequestBuilder prepareBulk(@Nullable String globalIndex) { + return new BulkRequestBuilder(this, BulkAction.INSTANCE, globalIndex); } @Override @@ -538,8 +534,8 @@ public GetRequestBuilder prepareGet() { } @Override - public GetRequestBuilder prepareGet(String index, String type, String id) { - return prepareGet().setIndex(index).setType(type).setId(id); + public GetRequestBuilder prepareGet(String index, String id) { + return prepareGet().setIndex(index).setId(id); } @Override @@ -618,8 +614,8 @@ public TermVectorsRequestBuilder prepareTermVectors() { } @Override - public TermVectorsRequestBuilder prepareTermVectors(String index, String type, String id) { - return new TermVectorsRequestBuilder(this, TermVectorsAction.INSTANCE, index, type, id); + public TermVectorsRequestBuilder prepareTermVectors(String index, String id) { + return new TermVectorsRequestBuilder(this, TermVectorsAction.INSTANCE, index, id); } @Override @@ -638,8 +634,8 @@ public MultiTermVectorsRequestBuilder prepareMultiTermVectors() { } @Override - public ExplainRequestBuilder prepareExplain(String index, String type, String id) { - return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, type, id); + public ExplainRequestBuilder prepareExplain(String index, String id) { + return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, id); } @Override @@ -1519,21 +1515,6 @@ public FlushRequestBuilder prepareFlush(String... indices) { return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); } - @Override - public ActionFuture syncedFlush(SyncedFlushRequest request) { - return execute(SyncedFlushAction.INSTANCE, request); - } - - @Override - public void syncedFlush(SyncedFlushRequest request, ActionListener listener) { - execute(SyncedFlushAction.INSTANCE, request, listener); - } - - @Override - public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) { - return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices); - } - @Override public void getMappings(GetMappingsRequest request, ActionListener listener) { execute(GetMappingsAction.INSTANCE, request, listener); diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index b852f1b905ad3..2c001833f46ce 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -332,10 +332,8 @@ private static boolean assertShardsConsistent( final Set indexNamesInShards = new HashSet<>(); shards.iterator().forEachRemaining(s -> { indexNamesInShards.add(s.key.getIndexName()); - assert source == null - || s.value.nodeId == null : "Shard snapshot must not be assigned to data node when copying from snapshot [" - + source - + "]"; + assert source == null || s.value.nodeId == null + : "Shard snapshot must not be assigned to data node when copying from snapshot [" + source + "]"; }); assert source == null || indexNames.isEmpty() == false : "No empty snapshot clones allowed"; assert source != null || indexNames.equals(indexNamesInShards) : "Indices in shards " @@ -348,12 +346,8 @@ private static boolean assertShardsConsistent( final boolean shardsCompleted = completed(shards.values()) && completed(clones.values()); // Check state consistency for normal snapshots and started clone operations if (source == null || clones.isEmpty() == false) { - assert (state.completed() && shardsCompleted) - || (state.completed() == false - && shardsCompleted == false) : "Completed state must imply all shards completed but saw state [" - + state - + "] and shards " - + shards; + assert (state.completed() && shardsCompleted) || (state.completed() == false && shardsCompleted == false) + : "Completed state must imply all shards completed but saw state [" + state + "] and shards " + shards; } if (source != null && state.completed()) { assert hasFailures(clones) == false || state == State.FAILED : "Failed shard clones in [" @@ -567,8 +561,8 @@ public Entry withStartedShards(ImmutableOpenMap sh userMetadata, version ); - assert updated.state().completed() == false - && completed(updated.shards().values()) == false : "Only running snapshots allowed but saw [" + updated + "]"; + assert updated.state().completed() == false && completed(updated.shards().values()) == false + : "Only running snapshots allowed but saw [" + updated + "]"; return updated; } @@ -966,8 +960,8 @@ private static boolean assertConsistentEntries(List entries) { for (Entry entry : entries) { for (ObjectObjectCursor shard : entry.shards()) { if (shard.value.isActive()) { - assert assignedShardsByRepo.computeIfAbsent(entry.repository(), k -> new HashSet<>()) - .add(shard.key) : "Found duplicate shard assignments in " + entries; + assert assignedShardsByRepo.computeIfAbsent(entry.repository(), k -> new HashSet<>()).add(shard.key) + : "Found duplicate shard assignments in " + entries; } } } diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index 4bc7e61a67240..d7100a194a4bb 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -51,7 +51,6 @@ import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.Index; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.Mapping; import java.util.concurrent.Semaphore; @@ -111,9 +110,6 @@ public void setClient(Client client) { * potentially waiting for a master node to be available. */ public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, ActionListener listener) { - if (type.equals(MapperService.DEFAULT_MAPPING)) { - throw new IllegalArgumentException("_default_ mapping should not be updated"); - } final RunOnce release = new RunOnce(() -> semaphore.release()); try { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index f9f6f5437f360..b28fde5d9cc16 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -305,7 +305,7 @@ public boolean handleJoin(Join join) { boolean prevElectionWon = electionWon; electionWon = isElectionQuorum(joinVotes); assert !prevElectionWon || electionWon : // we cannot go from won to not won - "locaNode= " + localNode + ", join=" + join + ", joinVotes=" + joinVotes; + "locaNode= " + localNode + ", join=" + join + ", joinVotes=" + joinVotes; logger.debug( "handleJoin: added join {} from [{}] for election, electionWon={} lastAcceptedTerm={} lastAcceptedVersion={}", join, @@ -378,8 +378,8 @@ && getLastCommittedConfiguration().equals(getLastAcceptedConfiguration()) == fal throw new CoordinationStateRejectedException("only allow reconfiguration if joinVotes have quorum for new config"); } - assert clusterState.getLastCommittedConfiguration() - .equals(getLastCommittedConfiguration()) : "last committed configuration should not change"; + assert clusterState.getLastCommittedConfiguration().equals(getLastCommittedConfiguration()) + : "last committed configuration should not change"; lastPublishedVersion = clusterState.version(); lastPublishedConfiguration = clusterState.getLastAcceptedConfiguration(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index d5eb550ca4e6d..557f11f75d969 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -1207,8 +1207,8 @@ ClusterState getStateForMasterService() { private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { if (clusterState.nodes().getMasterNodeId() != null) { // remove block if it already exists before adding new one - assert clusterState.blocks() - .hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false : "NO_MASTER_BLOCK should only be added by Coordinator"; + assert clusterState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false + : "NO_MASTER_BLOCK should only be added by Coordinator"; final ClusterBlocks clusterBlocks = ClusterBlocks.builder() .blocks(clusterState.blocks()) .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 62762937d5edb..ea5c33b4300a5 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -193,20 +193,15 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (joiniedNodeNameIds.isEmpty() == false) { Set currentVotingConfigExclusions = currentState.getVotingConfigExclusions(); Set newVotingConfigExclusions = currentVotingConfigExclusions.stream() - .map( - e -> { - // Update nodeId in VotingConfigExclusion when a new node with excluded node name joins - if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId()) - && joiniedNodeNameIds.containsKey(e.getNodeName())) { - return new CoordinationMetadata.VotingConfigExclusion( - joiniedNodeNameIds.get(e.getNodeName()), - e.getNodeName() - ); - } else { - return e; - } + .map(e -> { + // Update nodeId in VotingConfigExclusion when a new node with excluded node name joins + if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId()) + && joiniedNodeNameIds.containsKey(e.getNodeName())) { + return new CoordinationMetadata.VotingConfigExclusion(joiniedNodeNameIds.get(e.getNodeName()), e.getNodeName()); + } else { + return e; } - ) + }) .collect(Collectors.toSet()); // if VotingConfigExclusions did get updated diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 147c8987169c7..c02358d47b066 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -67,7 +67,6 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.gateway.MetadataStateFormat; import org.opensearch.index.Index; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; @@ -677,22 +676,11 @@ public ImmutableOpenMap getMappings() { @Nullable public MappingMetadata mapping() { for (ObjectObjectCursor cursor : mappings) { - if (cursor.key.equals(MapperService.DEFAULT_MAPPING) == false) { - return cursor.value; - } + return cursor.value; } return null; } - /** - * Get the default mapping. - * NOTE: this is always {@code null} for 7.x indices which are disallowed to have a default mapping. - */ - @Nullable - public MappingMetadata defaultMapping() { - return mappings.get(MapperService.DEFAULT_MAPPING); - } - public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; public static final String INDEX_RESIZE_SOURCE_NAME_KEY = "index.resize.source.name"; public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY); @@ -704,25 +692,6 @@ public Index getResizeSourceIndex() { : null; } - /** - * Sometimes, the default mapping exists and an actual mapping is not created yet (introduced), - * in this case, we want to return the default mapping in case it has some default mapping definitions. - *

- * Note, once the mapping type is introduced, the default mapping is applied on the actual typed MappingMetadata, - * setting its routing, timestamp, and so on if needed. - */ - @Nullable - public MappingMetadata mappingOrDefault() { - MappingMetadata mapping = null; - for (ObjectCursor m : mappings.values()) { - if (mapping == null || mapping.type().equals(MapperService.DEFAULT_MAPPING)) { - mapping = m.value; - } - } - - return mapping; - } - ImmutableOpenMap getCustomData() { return this.customData; } @@ -1337,14 +1306,6 @@ public IndexMetadata build() { ImmutableOpenMap.Builder tmpAliases = aliases; Settings tmpSettings = settings; - // update default mapping on the MappingMetadata - if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) { - MappingMetadata defaultMapping = mappings.get(MapperService.DEFAULT_MAPPING); - for (ObjectCursor cursor : mappings.values()) { - cursor.value.updateDefaultMapping(defaultMapping); - } - } - /* * We expect that the metadata has been properly built to set the number of shards and the number of replicas, and do not rely * on the default values here. Those must have been set upstream. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index 7cf3c3da24c52..d08fe3b926c66 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -51,7 +51,6 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.index.mapper.MapperService; import java.io.IOException; import java.util.ArrayList; @@ -432,11 +431,9 @@ private static void toInnerXContent( if (includeTypeName == false) { Map documentMapping = null; for (ObjectObjectCursor cursor : indexTemplateMetadata.mappings()) { - if (!cursor.key.equals(MapperService.DEFAULT_MAPPING)) { - assert documentMapping == null; - Map mapping = XContentHelper.convertToMap(cursor.value.uncompressed(), true).v2(); - documentMapping = reduceMapping(cursor.key, mapping); - } + assert documentMapping == null; + Map mapping = XContentHelper.convertToMap(cursor.value.uncompressed(), true).v2(); + documentMapping = reduceMapping(cursor.key, mapping); } if (documentMapping != null) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index e3ab1d491131a..02fe7ee8db889 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -140,12 +140,6 @@ private void initMappers(Map withoutType) { } } - void updateDefaultMapping(MappingMetadata defaultMapping) { - if (routing == Routing.EMPTY) { - routing = defaultMapping.routing(); - } - } - public String type() { return this.type; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 5f74c783577d8..4e2c475e6c4ce 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -98,7 +98,6 @@ import java.nio.file.Path; import java.time.Instant; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -474,7 +473,6 @@ private ClusterState applyCreateIndexWithTemporaryService( request.index(), aliases, indexService.mapperService()::documentMapper, - () -> indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING), temporaryIndexMeta.getSettings(), temporaryIndexMeta.getRoutingNumShards(), sourceMetadata, @@ -929,13 +927,11 @@ static Settings aggregateIndexSettings( * that will be used to create this index. */ shardLimitValidator.validateShardLimit(indexSettings, currentState); - if (indexSettings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) == false) { - DEPRECATION_LOGGER.deprecate( - "soft_deletes_disabled", - "Creating indices with soft-deletes disabled is deprecated and will be removed in future OpenSearch versions. " - + "Please do not specify value for setting [index.soft_deletes.enabled] of index [" - + request.index() - + "]." + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexSettings) == false + && IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(indexSettings).onOrAfter(Version.V_2_0_0)) { + throw new IllegalArgumentException( + "Creating indices with soft-deletes disabled is no longer supported. " + + "Please do not specify a value for setting [index.soft_deletes.enabled]." ); } validateTranslogRetentionSettings(indexSettings); @@ -988,9 +984,8 @@ static int getIndexNumberOfRoutingShards(Settings indexSettings, @Nullable Index routingNumShards = calculateNumRoutingShards(numTargetShards, indexVersionCreated); } } else { - assert IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists( - indexSettings - ) == false : "index.number_of_routing_shards should not be present on the target index on resize"; + assert IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(indexSettings) == false + : "index.number_of_routing_shards should not be present on the target index on resize"; routingNumShards = sourceMetadata.getRoutingNumShards(); } return routingNumShards; @@ -1102,7 +1097,6 @@ static IndexMetadata buildIndexMetadata( String indexName, List aliases, Supplier documentMapperSupplier, - Supplier defaultDocumentMapperSupplier, Settings indexSettings, int routingNumShards, @Nullable IndexMetadata sourceMetadata, @@ -1112,11 +1106,10 @@ static IndexMetadata buildIndexMetadata( indexMetadataBuilder.system(isSystem); // now, update the mappings with the actual source Map mappingsMetadata = new HashMap<>(); - for (DocumentMapper mapper : Arrays.asList(documentMapperSupplier.get(), defaultDocumentMapperSupplier.get())) { - if (mapper != null) { - MappingMetadata mappingMd = new MappingMetadata(mapper); - mappingsMetadata.put(mapper.type(), mappingMd); - } + DocumentMapper mapper = documentMapperSupplier.get(); + if (mapper != null) { + MappingMetadata mappingMd = new MappingMetadata(mapper); + mappingsMetadata.put(mapper.type(), mappingMd); } for (MappingMetadata mappingMd : mappingsMetadata.values()) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java index 9ed2a0f9257fc..7b135c9746652 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java @@ -57,17 +57,14 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.InvalidTypeNameException; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import static org.opensearch.index.mapper.MapperService.isMappingSourceTyped; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; /** @@ -190,15 +187,11 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetadata.Bui try { List updatedTypes = new ArrayList<>(); MapperService mapperService = indexService.mapperService(); - for (DocumentMapper mapper : Arrays.asList( - mapperService.documentMapper(), - mapperService.documentMapper(MapperService.DEFAULT_MAPPING) - )) { - if (mapper != null) { - final String type = mapper.type(); - if (!mapper.mappingSource().equals(builder.mapping(type).source())) { - updatedTypes.add(type); - } + DocumentMapper mapper = mapperService.documentMapper(); + if (mapper != null) { + final String type = mapper.type(); + if (!mapper.mappingSource().equals(builder.mapping(type).source())) { + updatedTypes.add(type); } } @@ -206,13 +199,8 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetadata.Bui if (updatedTypes.isEmpty() == false) { logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes); dirty = true; - for (DocumentMapper mapper : Arrays.asList( - mapperService.documentMapper(), - mapperService.documentMapper(MapperService.DEFAULT_MAPPING) - )) { - if (mapper != null) { - builder.putMapping(new MappingMetadata(mapper)); - } + if (mapper != null) { + builder.putMapping(new MappingMetadata(mapper)); } } } catch (Exception e) { @@ -272,7 +260,6 @@ private ClusterState applyRequest( PutMappingClusterStateUpdateRequest request, Map indexMapperServices ) throws IOException { - String mappingType = request.type(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); final Metadata metadata = currentState.metadata(); final List updateList = new ArrayList<>(); @@ -286,43 +273,12 @@ private ClusterState applyRequest( // we used for the validation, it makes this mechanism little less scary (a little) updateList.add(indexMetadata); // try and parse it (no need to add it here) so we can bail early in case of parsing exception - DocumentMapper newMapper; DocumentMapper existingMapper = mapperService.documentMapper(); - - String typeForUpdate = mapperService.getTypeForUpdate(mappingType, mappingUpdateSource); - if (existingMapper != null && existingMapper.type().equals(typeForUpdate) == false) { - throw new IllegalArgumentException( - "Rejecting mapping update to [" - + mapperService.index().getName() - + "] as the final mapping would have more than 1 type: " - + Arrays.asList(existingMapper.type(), typeForUpdate) - ); - } - - if (MapperService.DEFAULT_MAPPING.equals(request.type())) { - // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default - newMapper = mapperService.parse(request.type(), mappingUpdateSource, false); - } else { - newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null); - if (existingMapper != null) { - // first, simulate: just call merge and ignore the result - existingMapper.merge(newMapper.mapping(), MergeReason.MAPPING_UPDATE); - } + DocumentMapper newMapper = mapperService.parse(request.type(), mappingUpdateSource); + if (existingMapper != null) { + // first, simulate: just call merge and ignore the result + existingMapper.merge(newMapper.mapping(), MergeReason.MAPPING_UPDATE); } - if (mappingType == null) { - mappingType = newMapper.type(); - } else if (mappingType.equals(newMapper.type()) == false - && (isMappingSourceTyped(request.type(), mappingUpdateSource) - || mapperService.resolveDocumentType(mappingType).equals(newMapper.type()) == false)) { - throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition."); - } - } - assert mappingType != null; - - if (MapperService.DEFAULT_MAPPING.equals(mappingType) == false - && MapperService.SINGLE_MAPPING_NAME.equals(mappingType) == false - && mappingType.charAt(0) == '_') { - throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]"); } Metadata.Builder builder = Metadata.builder(metadata); boolean updated = false; @@ -333,13 +289,12 @@ private ClusterState applyRequest( final Index index = indexMetadata.getIndex(); final MapperService mapperService = indexMapperServices.get(index); - String typeForUpdate = mapperService.getTypeForUpdate(mappingType, mappingUpdateSource); CompressedXContent existingSource = null; - DocumentMapper existingMapper = mapperService.documentMapper(typeForUpdate); + DocumentMapper existingMapper = mapperService.documentMapper(); if (existingMapper != null) { existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = mapperService.merge(typeForUpdate, mappingUpdateSource, MergeReason.MAPPING_UPDATE); + DocumentMapper mergedMapper = mapperService.merge(request.type(), mappingUpdateSource, MergeReason.MAPPING_UPDATE); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { @@ -358,22 +313,18 @@ private ClusterState applyRequest( } else { updatedMapping = true; if (logger.isDebugEnabled()) { - logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource); + logger.debug("{} create_mapping with source [{}]", index, updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("{} create_mapping [{}]", index, mappingType); + logger.info("{} create_mapping", index); } } IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetadata); // Mapping updates on a single type may have side-effects on other types so we need to // update mapping metadata on all types - for (DocumentMapper mapper : Arrays.asList( - mapperService.documentMapper(), - mapperService.documentMapper(MapperService.DEFAULT_MAPPING) - )) { - if (mapper != null) { - indexMetadataBuilder.putMapping(new MappingMetadata(mapper.mappingSource())); - } + DocumentMapper mapper = mapperService.documentMapper(); + if (mapper != null) { + indexMetadataBuilder.putMapping(new MappingMetadata(mapper.mappingSource())); } if (updatedMapping) { indexMetadataBuilder.mappingVersion(1 + indexMetadataBuilder.mappingVersion()); diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 1b83e1745585e..6d4db7a6513b9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -38,7 +38,6 @@ import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -55,8 +54,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.common.Booleans.parseBoolean; - public class OperationRouting { public static final Setting USE_ADAPTIVE_REPLICA_SELECTION_SETTING = Setting.boolSetting( @@ -66,53 +63,52 @@ public class OperationRouting { Setting.Property.NodeScope ); - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(OperationRouting.class); - private static final String IGNORE_AWARENESS_ATTRIBUTES_PROPERTY = "opensearch.search.ignore_awareness_attributes"; - static final String IGNORE_AWARENESS_ATTRIBUTES_DEPRECATION_MESSAGE = - "searches will not be routed based on awareness attributes starting in version 8.0.0; " - + "to opt into this behaviour now please set the system property [" - + IGNORE_AWARENESS_ATTRIBUTES_PROPERTY - + "] to [true]"; - - private List awarenessAttributes; - private boolean useAdaptiveReplicaSelection; + public static final String IGNORE_AWARENESS_ATTRIBUTES = "cluster.search.ignore_awareness_attributes"; + public static final Setting IGNORE_AWARENESS_ATTRIBUTES_SETTING = Setting.boolSetting( + IGNORE_AWARENESS_ATTRIBUTES, + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile List awarenessAttributes; + private volatile boolean useAdaptiveReplicaSelection; + private volatile boolean ignoreAwarenessAttr; public OperationRouting(Settings settings, ClusterSettings clusterSettings) { // whether to ignore awareness attributes when routing requests - boolean ignoreAwarenessAttr = parseBoolean(System.getProperty(IGNORE_AWARENESS_ATTRIBUTES_PROPERTY), false); - if (ignoreAwarenessAttr == false) { - awarenessAttributes = AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); - if (awarenessAttributes.isEmpty() == false) { - deprecationLogger.deprecate("searches_not_routed_on_awareness_attributes", IGNORE_AWARENESS_ATTRIBUTES_DEPRECATION_MESSAGE); - } - clusterSettings.addSettingsUpdateConsumer( - AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, - this::setAwarenessAttributes - ); - } else { - awarenessAttributes = Collections.emptyList(); - } - + this.ignoreAwarenessAttr = clusterSettings.get(IGNORE_AWARENESS_ATTRIBUTES_SETTING); + this.awarenessAttributes = AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, + this::setAwarenessAttributes + ); this.useAdaptiveReplicaSelection = USE_ADAPTIVE_REPLICA_SELECTION_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(USE_ADAPTIVE_REPLICA_SELECTION_SETTING, this::setUseAdaptiveReplicaSelection); + clusterSettings.addSettingsUpdateConsumer(IGNORE_AWARENESS_ATTRIBUTES_SETTING, this::setIgnoreAwarenessAttributes); } void setUseAdaptiveReplicaSelection(boolean useAdaptiveReplicaSelection) { this.useAdaptiveReplicaSelection = useAdaptiveReplicaSelection; } + void setIgnoreAwarenessAttributes(boolean ignoreAwarenessAttributes) { + this.ignoreAwarenessAttr = ignoreAwarenessAttributes; + } + + public boolean isIgnoreAwarenessAttr() { + return ignoreAwarenessAttr; + } + List getAwarenessAttributes() { return awarenessAttributes; } private void setAwarenessAttributes(List awarenessAttributes) { - boolean ignoreAwarenessAttr = parseBoolean(System.getProperty(IGNORE_AWARENESS_ATTRIBUTES_PROPERTY), false); - if (ignoreAwarenessAttr == false) { - if (this.awarenessAttributes.isEmpty() && awarenessAttributes.isEmpty() == false) { - deprecationLogger.deprecate("searches_not_routed_on_awareness_attributes", IGNORE_AWARENESS_ATTRIBUTES_DEPRECATION_MESSAGE); - } - this.awarenessAttributes = awarenessAttributes; - } + this.awarenessAttributes = awarenessAttributes; + } + + public boolean ignoreAwarenessAttributes() { + return this.awarenessAttributes.isEmpty() || this.ignoreAwarenessAttr; } public ShardIterator indexShards(ClusterState clusterState, String index, String id, @Nullable String routing) { @@ -286,8 +282,7 @@ private ShardIterator preferenceActiveShardIterator( // for a different element in the list by also incorporating the // shard ID into the hash of the user-supplied preference key. routingHash = 31 * routingHash + indexShard.shardId.hashCode(); - - if (awarenessAttributes.isEmpty()) { + if (ignoreAwarenessAttributes()) { return indexShard.activeInitializingShardsIt(routingHash); } else { return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, routingHash); @@ -300,7 +295,7 @@ private ShardIterator shardRoutings( @Nullable ResponseCollectorService collectorService, @Nullable Map nodeCounts ) { - if (awarenessAttributes.isEmpty()) { + if (ignoreAwarenessAttributes()) { if (useAdaptiveReplicaSelection) { return indexShard.activeInitializingShardsRankedIt(collectorService, nodeCounts); } else { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java index 123d09246bb7b..bf8288885b4a4 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.collect.Tuple; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; @@ -48,6 +49,8 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; /** * A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards @@ -55,11 +58,75 @@ */ public class RoutingNode implements Iterable { + static class BucketedShards implements Iterable { + private final Tuple, LinkedHashMap> shardTuple; // LinkedHashMap to + // preserve order + + BucketedShards(LinkedHashMap primaryShards, LinkedHashMap replicaShards) { + this.shardTuple = new Tuple(primaryShards, replicaShards); + } + + public boolean isEmpty() { + return this.shardTuple.v1().isEmpty() && this.shardTuple.v2().isEmpty(); + } + + public int size() { + return this.shardTuple.v1().size() + this.shardTuple.v2().size(); + } + + public boolean containsKey(ShardId shardId) { + return this.shardTuple.v1().containsKey(shardId) || this.shardTuple.v2().containsKey(shardId); + } + + public ShardRouting get(ShardId shardId) { + if (this.shardTuple.v1().containsKey(shardId)) { + return this.shardTuple.v1().get(shardId); + } + return this.shardTuple.v2().get(shardId); + } + + public ShardRouting put(ShardRouting shardRouting) { + return put(shardRouting.shardId(), shardRouting); + } + + public ShardRouting put(ShardId shardId, ShardRouting shardRouting) { + ShardRouting ret; + if (shardRouting.primary()) { + ret = this.shardTuple.v1().put(shardId, shardRouting); + if (this.shardTuple.v2().containsKey(shardId)) { + ret = this.shardTuple.v2().remove(shardId); + } + } else { + ret = this.shardTuple.v2().put(shardId, shardRouting); + if (this.shardTuple.v1().containsKey(shardId)) { + ret = this.shardTuple.v1().remove(shardId); + } + } + + return ret; + } + + public ShardRouting remove(ShardId shardId) { + if (this.shardTuple.v1().containsKey(shardId)) { + return this.shardTuple.v1().remove(shardId); + } + return this.shardTuple.v2().remove(shardId); + } + + @Override + public Iterator iterator() { + return Stream.concat( + Collections.unmodifiableCollection(this.shardTuple.v1().values()).stream(), + Collections.unmodifiableCollection(this.shardTuple.v2().values()).stream() + ).iterator(); + } + } + private final String nodeId; private final DiscoveryNode node; - private final LinkedHashMap shards; // LinkedHashMap to preserve order + private final BucketedShards shards; private final LinkedHashSet initializingShards; @@ -67,44 +134,44 @@ public class RoutingNode implements Iterable { private final HashMap> shardsByIndex; - public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shards) { - this(nodeId, node, buildShardRoutingMap(shards)); - } - - RoutingNode(String nodeId, DiscoveryNode node, LinkedHashMap shards) { + public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shardRoutings) { this.nodeId = nodeId; this.node = node; - this.shards = shards; + final LinkedHashMap primaryShards = new LinkedHashMap<>(); + final LinkedHashMap replicaShards = new LinkedHashMap<>(); + this.shards = new BucketedShards(primaryShards, replicaShards); this.relocatingShards = new LinkedHashSet<>(); this.initializingShards = new LinkedHashSet<>(); this.shardsByIndex = new LinkedHashMap<>(); - for (ShardRouting shardRouting : shards.values()) { + + for (ShardRouting shardRouting : shardRoutings) { if (shardRouting.initializing()) { initializingShards.add(shardRouting); } else if (shardRouting.relocating()) { relocatingShards.add(shardRouting); } shardsByIndex.computeIfAbsent(shardRouting.index(), k -> new LinkedHashSet<>()).add(shardRouting); - } - assert invariant(); - } - private static LinkedHashMap buildShardRoutingMap(ShardRouting... shardRoutings) { - final LinkedHashMap shards = new LinkedHashMap<>(); - for (ShardRouting shardRouting : shardRoutings) { - ShardRouting previousValue = shards.put(shardRouting.shardId(), shardRouting); + ShardRouting previousValue; + if (shardRouting.primary()) { + previousValue = primaryShards.put(shardRouting.shardId(), shardRouting); + } else { + previousValue = replicaShards.put(shardRouting.shardId(), shardRouting); + } + if (previousValue != null) { throw new IllegalArgumentException( "Cannot have two different shards with same shard id " + shardRouting.shardId() + " on same node " ); } } - return shards; + + assert invariant(); } @Override public Iterator iterator() { - return Collections.unmodifiableCollection(shards.values()).iterator(); + return shards.iterator(); } /** @@ -139,7 +206,7 @@ public int size() { */ void add(ShardRouting shard) { assert invariant(); - if (shards.containsKey(shard.shardId())) { + if (shards.put(shard) != null) { throw new IllegalStateException( "Trying to add a shard " + shard.shardId() @@ -152,7 +219,6 @@ void add(ShardRouting shard) { + "]" ); } - shards.put(shard.shardId(), shard); if (shard.initializing()) { initializingShards.add(shard); @@ -322,7 +388,7 @@ public int numberOfOwningShardsForIndex(final Index index) { public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("-----node_id[").append(nodeId).append("][").append(node == null ? "X" : "V").append("]\n"); - for (ShardRouting entry : shards.values()) { + for (ShardRouting entry : shards) { sb.append("--------").append(entry.shortSummary()).append('\n'); } return sb.toString(); @@ -345,7 +411,9 @@ public String toString() { } public List copyShards() { - return new ArrayList<>(shards.values()); + List result = new ArrayList<>(); + shards.forEach(result::add); + return result; } public boolean isEmpty() { @@ -355,23 +423,20 @@ public boolean isEmpty() { private boolean invariant() { // initializingShards must consistent with that in shards - Collection shardRoutingsInitializing = shards.values() - .stream() + Collection shardRoutingsInitializing = StreamSupport.stream(shards.spliterator(), false) .filter(ShardRouting::initializing) .collect(Collectors.toList()); assert initializingShards.size() == shardRoutingsInitializing.size(); assert initializingShards.containsAll(shardRoutingsInitializing); // relocatingShards must consistent with that in shards - Collection shardRoutingsRelocating = shards.values() - .stream() + Collection shardRoutingsRelocating = StreamSupport.stream(shards.spliterator(), false) .filter(ShardRouting::relocating) .collect(Collectors.toList()); assert relocatingShards.size() == shardRoutingsRelocating.size(); assert relocatingShards.containsAll(shardRoutingsRelocating); - final Map> shardRoutingsByIndex = shards.values() - .stream() + final Map> shardRoutingsByIndex = StreamSupport.stream(shards.spliterator(), false) .collect(Collectors.groupingBy(ShardRouting::index, Collectors.toSet())); assert shardRoutingsByIndex.equals(shardsByIndex); diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index d81e7fa6e22d9..2906ba783a5a5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -56,7 +56,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.List; import java.util.ListIterator; import java.util.Map; @@ -108,10 +107,10 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) { this.readOnly = readOnly; final RoutingTable routingTable = clusterState.routingTable(); - Map> nodesToShards = new HashMap<>(); // fill in the nodeToShards with the "live" nodes for (ObjectCursor cursor : clusterState.nodes().getDataNodes().values()) { - nodesToShards.put(cursor.value.getId(), new LinkedHashMap<>()); // LinkedHashMap to preserve order + String nodeId = cursor.value.getId(); + this.nodesToShards.put(cursor.value.getId(), new RoutingNode(nodeId, clusterState.nodes().get(nodeId))); } // fill in the inverse of node -> shards allocated @@ -125,27 +124,23 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) { // by the ShardId, as this is common for primary and replicas. // A replica Set might have one (and not more) replicas with the state of RELOCATING. if (shard.assignedToNode()) { - Map entries = nodesToShards.computeIfAbsent( + RoutingNode routingNode = this.nodesToShards.computeIfAbsent( shard.currentNodeId(), - k -> new LinkedHashMap<>() - ); // LinkedHashMap to preserve order - ShardRouting previousValue = entries.put(shard.shardId(), shard); - if (previousValue != null) { - throw new IllegalArgumentException("Cannot have two different shards with same shard id on same node"); - } + k -> new RoutingNode(shard.currentNodeId(), clusterState.nodes().get(shard.currentNodeId())) + ); + routingNode.add(shard); assignedShardsAdd(shard); if (shard.relocating()) { relocatingShards++; - // LinkedHashMap to preserve order. // Add the counterpart shard with relocatingNodeId reflecting the source from which // it's relocating from. - entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new LinkedHashMap<>()); + routingNode = nodesToShards.computeIfAbsent( + shard.relocatingNodeId(), + k -> new RoutingNode(shard.relocatingNodeId(), clusterState.nodes().get(shard.relocatingNodeId())) + ); ShardRouting targetShardRouting = shard.getTargetRelocatingShard(); addInitialRecovery(targetShardRouting, indexShard.primary); - previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting); - if (previousValue != null) { - throw new IllegalArgumentException("Cannot have two different shards with same shard id on same node"); - } + routingNode.add(targetShardRouting); assignedShardsAdd(targetShardRouting); } else if (shard.initializing()) { if (shard.primary()) { @@ -160,10 +155,6 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) { } } } - for (Map.Entry> entry : nodesToShards.entrySet()) { - String nodeId = entry.getKey(); - this.nodesToShards.put(nodeId, new RoutingNode(nodeId, clusterState.nodes().get(nodeId), entry.getValue())); - } } private void addRecovery(ShardRouting routing) { @@ -608,13 +599,11 @@ public void failShard( ensureMutable(); assert failedShard.assignedToNode() : "only assigned shards can be failed"; assert indexMetadata.getIndex().equals(failedShard.index()) : "shard failed for unknown index (shard entry: " + failedShard + ")"; - assert getByAllocationId( - failedShard.shardId(), - failedShard.allocationId().getId() - ) == failedShard : "shard routing to fail does not exist in routing table, expected: " - + failedShard - + " but was: " - + getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()); + assert getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()) == failedShard + : "shard routing to fail does not exist in routing table, expected: " + + failedShard + + " but was: " + + getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()); logger.debug("{} failing shard {} with unassigned info ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); @@ -859,12 +848,8 @@ private void updateAssigned(ShardRouting oldShard, ShardRouting newShard) { + oldShard + " by shard with same shard id but was " + newShard; - assert oldShard.unassigned() == false - && newShard.unassigned() == false : "only assigned shards can be updated in list of assigned shards (prev: " - + oldShard - + ", new: " - + newShard - + ")"; + assert oldShard.unassigned() == false && newShard.unassigned() == false + : "only assigned shards can be updated in list of assigned shards (prev: " + oldShard + ", new: " + newShard + ")"; assert oldShard.currentNodeId().equals(newShard.currentNodeId()) : "shard to update " + oldShard + " can only update " @@ -1289,37 +1274,97 @@ private void ensureMutable() { * the first node, then the first shard of the second node, etc. until one shard from each node has been returned. * The iterator then resumes on the first node by returning the second shard and continues until all shards from * all the nodes have been returned. + * @param movePrimaryFirst if true, all primary shards are iterated over before iterating replica for any node + * @return iterator of shard routings */ - public Iterator nodeInterleavedShardIterator() { + public Iterator nodeInterleavedShardIterator(boolean movePrimaryFirst) { final Queue> queue = new ArrayDeque<>(); for (Map.Entry entry : nodesToShards.entrySet()) { queue.add(entry.getValue().copyShards().iterator()); } - return new Iterator() { - public boolean hasNext() { - while (!queue.isEmpty()) { - if (queue.peek().hasNext()) { + if (movePrimaryFirst) { + return new Iterator() { + private Queue replicaShards = new ArrayDeque<>(); + private Queue> replicaIterators = new ArrayDeque<>(); + + public boolean hasNext() { + while (!queue.isEmpty()) { + if (queue.peek().hasNext()) { + return true; + } + queue.poll(); + } + if (!replicaShards.isEmpty()) { return true; } - queue.poll(); + while (!replicaIterators.isEmpty()) { + if (replicaIterators.peek().hasNext()) { + return true; + } + replicaIterators.poll(); + } + return false; } - return false; - } - public ShardRouting next() { - if (hasNext() == false) { - throw new NoSuchElementException(); + public ShardRouting next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + while (!queue.isEmpty()) { + Iterator iter = queue.poll(); + if (iter.hasNext()) { + ShardRouting result = iter.next(); + if (result.primary()) { + queue.offer(iter); + return result; + } + replicaShards.offer(result); + replicaIterators.offer(iter); + } + } + if (!replicaShards.isEmpty()) { + return replicaShards.poll(); + } + Iterator replicaIterator = replicaIterators.poll(); + ShardRouting replicaShard = replicaIterator.next(); + replicaIterators.offer(replicaIterator); + + assert !replicaShard.primary(); + return replicaShard; } - Iterator iter = queue.poll(); - ShardRouting result = iter.next(); - queue.offer(iter); - return result; - } - public void remove() { - throw new UnsupportedOperationException(); - } - }; + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } else { + return new Iterator() { + @Override + public boolean hasNext() { + while (!queue.isEmpty()) { + if (queue.peek().hasNext()) { + return true; + } + queue.poll(); + } + return false; + } + + @Override + public ShardRouting next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + Iterator iter = queue.poll(); + queue.offer(iter); + return iter.next(); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } } private static final class Recoveries { diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index 84c5a6e26d6b9..36c7545c16c5c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -101,16 +101,13 @@ public final class ShardRouting implements Writeable, ToXContentObject { assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; - assert expectedShardSize >= 0 - || state != ShardRoutingState.INITIALIZING - || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; + assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING + : expectedShardSize + " state: " + state; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; - assert (state == ShardRoutingState.UNASSIGNED - || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " - + state; - assert recoverySource == null - || recoverySource == PeerRecoverySource.INSTANCE - || primary : "replica shards always recover from primary"; + assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) + : "recovery source only available on unassigned or initializing shard but was " + state; + assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary + : "replica shards always recover from primary"; assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node " + this; } @@ -589,12 +586,8 @@ public ShardRouting moveUnassignedFromPrimary() { **/ public boolean isSameAllocation(ShardRouting other) { boolean b = this.allocationId != null && other.allocationId != null && this.allocationId.getId().equals(other.allocationId.getId()); - assert b == false - || this.currentNodeId.equals(other.currentNodeId) : "ShardRoutings have the same allocation id but not the same node. This [" - + this - + "], other [" - + other - + "]"; + assert b == false || this.currentNodeId.equals(other.currentNodeId) + : "ShardRoutings have the same allocation id but not the same node. This [" + this + "], other [" + other + "]"; return b; } @@ -613,50 +606,35 @@ public boolean isRelocationTargetOf(ShardRouting other) { && this.state == ShardRoutingState.INITIALIZING && this.allocationId.getId().equals(other.allocationId.getRelocationId()); - assert b == false - || other.state == ShardRoutingState.RELOCATING : "ShardRouting is a relocation target but the source shard state isn't relocating. This [" + assert b == false || other.state == ShardRoutingState.RELOCATING + : "ShardRouting is a relocation target but the source shard state isn't relocating. This [" + this + "], other [" + other + "]"; + + assert b == false || other.allocationId.getId().equals(this.allocationId.getRelocationId()) + : "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId." + + " This [" + + this + + "], other [" + + other + + "]"; + + assert b == false || other.currentNodeId().equals(this.relocatingNodeId) + : "ShardRouting is a relocation target but source current node id isn't equal to target relocating node." + + " This [" + + this + + "], other [" + + other + + "]"; + + assert b == false || this.currentNodeId().equals(other.relocatingNodeId) + : "ShardRouting is a relocation target but current node id isn't equal to source relocating node." + + " This [" + this + "], other [" + other + "]"; - assert b == false - || other.allocationId.getId() - .equals( - this.allocationId.getRelocationId() - ) : "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId." - + " This [" - + this - + "], other [" - + other - + "]"; - - assert b == false - || other.currentNodeId() - .equals( - this.relocatingNodeId - ) : "ShardRouting is a relocation target but source current node id isn't equal to target relocating node." - + " This [" - + this - + "], other [" - + other - + "]"; - - assert b == false - || this.currentNodeId() - .equals( - other.relocatingNodeId - ) : "ShardRouting is a relocation target but current node id isn't equal to source relocating node." - + " This [" - + this - + "], other [" - + other - + "]"; - - assert b == false - || this.shardId.equals( - other.shardId - ) : "ShardRouting is a relocation target but both indexRoutings are not of the same shard id." + assert b == false || this.shardId.equals(other.shardId) + : "ShardRouting is a relocation target but both indexRoutings are not of the same shard id." + " This [" + this + "], other [" @@ -680,48 +658,35 @@ public boolean isRelocationSourceOf(ShardRouting other) { && other.state == ShardRoutingState.INITIALIZING && other.allocationId.getId().equals(this.allocationId.getRelocationId()); - assert b == false - || this.state == ShardRoutingState.RELOCATING : "ShardRouting is a relocation source but shard state isn't relocating. This [" + assert b == false || this.state == ShardRoutingState.RELOCATING + : "ShardRouting is a relocation source but shard state isn't relocating. This [" + this + "], other [" + other + "]"; + + assert b == false || this.allocationId.getId().equals(other.allocationId.getRelocationId()) + : "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId." + + " This [" + + this + + "], other [" + + other + + "]"; + + assert b == false || this.currentNodeId().equals(other.relocatingNodeId) + : "ShardRouting is a relocation source but current node isn't equal to other's relocating node." + + " This [" + + this + + "], other [" + + other + + "]"; + + assert b == false || other.currentNodeId().equals(this.relocatingNodeId) + : "ShardRouting is a relocation source but relocating node isn't equal to other's current node." + + " This [" + this + "], other [" + other + "]"; - assert b == false - || this.allocationId.getId() - .equals( - other.allocationId.getRelocationId() - ) : "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId." - + " This [" - + this - + "], other [" - + other - + "]"; - - assert b == false - || this.currentNodeId() - .equals( - other.relocatingNodeId - ) : "ShardRouting is a relocation source but current node isn't equal to other's relocating node." - + " This [" - + this - + "], other [" - + other - + "]"; - - assert b == false - || other.currentNodeId() - .equals( - this.relocatingNodeId - ) : "ShardRouting is a relocation source but relocating node isn't equal to other's current node." - + " This [" - + this - + "], other [" - + other - + "]"; - - assert b == false - || this.shardId.equals(other.shardId) : "ShardRouting is a relocation source but both indexRoutings are not of the same shard." + assert b == false || this.shardId.equals(other.shardId) + : "ShardRouting is a relocation source but both indexRoutings are not of the same shard." + " This [" + this + "], target [" diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 1680c13a72e0e..53ade0d6c5ae2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -528,8 +528,8 @@ private boolean hasDeadNodes(RoutingAllocation allocation) { private void reroute(RoutingAllocation allocation) { assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes"; - assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation) - .isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; + assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty() + : "auto-expand replicas out of sync with number of nodes in the cluster"; assert assertInitialized(); removeDelayMarkers(allocation); @@ -602,15 +602,13 @@ private void applyStartedShards(RoutingAllocation routingAllocation, List oldInSyncAllocationIds = oldIndexMetadata.inSyncAllocationIds(shardId.id()); @@ -217,9 +213,8 @@ private IndexMetadata.Builder updateInSyncAllocations( inSyncAllocationIds.removeAll(updates.removedAllocationIds); assert oldInSyncAllocationIds.contains(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) == false - || inSyncAllocationIds.contains( - RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID - ) == false : "fake allocation id has to be removed, inSyncAllocationIds:" + inSyncAllocationIds; + || inSyncAllocationIds.contains(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) == false + : "fake allocation id has to be removed, inSyncAllocationIds:" + inSyncAllocationIds; // Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary // but repeatedly shut down nodes that have active replicas. @@ -258,9 +253,8 @@ private IndexMetadata.Builder updateInSyncAllocations( inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId()); } - assert inSyncAllocationIds.isEmpty() == false - || oldInSyncAllocationIds.isEmpty() : "in-sync allocations cannot become empty after they have been non-empty: " - + oldInSyncAllocationIds; + assert inSyncAllocationIds.isEmpty() == false || oldInSyncAllocationIds.isEmpty() + : "in-sync allocations cannot become empty after they have been non-empty: " + oldInSyncAllocationIds; // be extra safe here and only update in-sync set if it is non-empty if (inSyncAllocationIds.isEmpty() == false) { @@ -295,11 +289,8 @@ public static ClusterState removeStaleIdsWithoutRoutings(ClusterState clusterSta int shardNumber = shardEntry.getKey().getId(); Set oldInSyncAllocations = oldIndexMetadata.inSyncAllocationIds(shardNumber); Set idsToRemove = shardEntry.getValue().stream().map(e -> e.getAllocationId()).collect(Collectors.toSet()); - assert idsToRemove.stream() - .allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) : "removing stale ids: " - + idsToRemove - + ", some of which have still a routing entry: " - + oldRoutingTable; + assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) + : "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable; Set remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove); assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard " + shardEntry.getKey() diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingNodesChangedObserver.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingNodesChangedObserver.java index 411b862312845..7be75d5baf0b2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingNodesChangedObserver.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingNodesChangedObserver.java @@ -91,9 +91,8 @@ public void relocationCompleted(ShardRouting removedRelocationSource) { @Override public void relocationSourceRemoved(ShardRouting removedReplicaRelocationSource) { - assert removedReplicaRelocationSource.primary() == false - && removedReplicaRelocationSource.isRelocationTarget() : "expected replica relocation target shard " - + removedReplicaRelocationSource; + assert removedReplicaRelocationSource.primary() == false && removedReplicaRelocationSource.isRelocationTarget() + : "expected replica relocation target shard " + removedReplicaRelocationSource; setChanged(); } @@ -108,11 +107,8 @@ public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRoutin assert oldReplica.initializing() && oldReplica.primary() == false : "expected initializing replica shard " + oldReplica; assert reinitializedReplica.initializing() && reinitializedReplica.primary() == false : "expected reinitialized replica shard " + reinitializedReplica; - assert oldReplica.allocationId() - .getId() - .equals( - reinitializedReplica.allocationId().getId() - ) == false : "expected allocation id to change for reinitialized replica shard (old: " + assert oldReplica.allocationId().getId().equals(reinitializedReplica.allocationId().getId()) == false + : "expected allocation id to change for reinitialized replica shard (old: " + oldReplica + " new: " + reinitializedReplica diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 93c9df71656f9..b3a045af91952 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -109,6 +109,12 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.Dynamic, Property.NodeScope ); + public static final Setting SHARD_MOVE_PRIMARY_FIRST_SETTING = Setting.boolSetting( + "cluster.routing.allocation.move.primary_first", + false, + Property.Dynamic, + Property.NodeScope + ); public static final Setting THRESHOLD_SETTING = Setting.floatSetting( "cluster.routing.allocation.balance.threshold", 1.0f, @@ -117,6 +123,7 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + private volatile boolean movePrimaryFirst; private volatile WeightFunction weightFunction; private volatile float threshold; @@ -128,10 +135,15 @@ public BalancedShardsAllocator(Settings settings) { public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); setThreshold(THRESHOLD_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } + private void setMovePrimaryFirst(boolean movePrimaryFirst) { + this.movePrimaryFirst = movePrimaryFirst; + } + private void setWeightFunction(float indexBalance, float shardBalanceFactor) { weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); } @@ -146,7 +158,7 @@ public void allocate(RoutingAllocation allocation) { failAllocationOfNewPrimaries(allocation); return; } - final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); + final Balancer balancer = new Balancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); balancer.allocateUnassigned(); balancer.moveShards(); balancer.balance(); @@ -154,7 +166,7 @@ public void allocate(RoutingAllocation allocation) { @Override public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, final RoutingAllocation allocation) { - Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); + Balancer balancer = new Balancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; if (shard.unassigned()) { @@ -283,6 +295,7 @@ public static class Balancer { private final Map nodes; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; + private final boolean movePrimaryFirst; private final WeightFunction weight; private final float threshold; @@ -291,9 +304,10 @@ public static class Balancer { private final NodeSorter sorter; private final Set inEligibleTargetNode; - public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { + public Balancer(Logger logger, RoutingAllocation allocation, boolean movePrimaryFirst, WeightFunction weight, float threshold) { this.logger = logger; this.allocation = allocation; + this.movePrimaryFirst = movePrimaryFirst; this.weight = weight; this.threshold = threshold; this.routingNodes = allocation.routingNodes(); @@ -725,7 +739,8 @@ public void moveShards() { for (ModelNode currentNode : sorter.modelNodes) { checkAndAddInEligibleTargetNode(currentNode.getRoutingNode()); } - for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext();) { + boolean primariesThrottled = false; + for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(movePrimaryFirst); it.hasNext();) { // Verify if the cluster concurrent recoveries have been reached. if (allocation.deciders().canMoveAnyShard(allocation).type() != Decision.Type.YES) { logger.info( @@ -745,11 +760,23 @@ public void moveShards() { ShardRouting shardRouting = it.next(); + // Ensure that replicas don't relocate if primaries are being throttled and primary first is enabled + if (movePrimaryFirst && primariesThrottled && !shardRouting.primary()) { + logger.info( + "Cannot move any replica shard in the cluster as movePrimaryFirst is enabled and primary shards" + + "are being throttled. Skipping shard iteration" + ); + return; + } + // Verify if the shard is allowed to move if outgoing recovery on the node hosting the primary shard // is not being throttled. Decision canMoveAwayDecision = allocation.deciders().canMoveAway(shardRouting, allocation); if (canMoveAwayDecision.type() != Decision.Type.YES) { if (logger.isDebugEnabled()) logger.debug("Cannot move away shard [{}] Skipping this shard", shardRouting); + if (shardRouting.primary() && canMoveAwayDecision.type() == Type.THROTTLE) { + primariesThrottled = true; + } continue; } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index bb51d9bc312ac..46d65f310a427 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -237,9 +237,8 @@ public ClusterApplierService getClusterApplierService() { public static boolean assertClusterOrMasterStateThread() { assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) - || Thread.currentThread() - .getName() - .contains(MasterService.MASTER_UPDATE_THREAD_NAME) : "not called from the master/cluster state update thread"; + || Thread.currentThread().getName().contains(MasterService.MASTER_UPDATE_THREAD_NAME) + : "not called from the master/cluster state update thread"; return true; } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index b971e8463bda9..7b0bede4c6c76 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -539,8 +539,9 @@ void notifySuccessfulTasksOnUnchangedClusterState() { */ public List pendingTasks() { return Arrays.stream(threadPoolExecutor.getPending()).map(pending -> { - assert pending.task instanceof SourcePrioritizedRunnable : "thread pool executor should only use SourcePrioritizedRunnable instances but found: " - + pending.task.getClass().getName(); + assert pending.task instanceof SourcePrioritizedRunnable + : "thread pool executor should only use SourcePrioritizedRunnable instances but found: " + + pending.task.getClass().getName(); SourcePrioritizedRunnable task = (SourcePrioritizedRunnable) pending.task; return new PendingClusterTask( pending.insertionOrder, diff --git a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java index bc3fc11d631da..a0648e0d9ab12 100644 --- a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java +++ b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java @@ -71,9 +71,8 @@ public void submitTasks(List tasks, @Nullable TimeValue t return; } final BatchedTask firstTask = tasks.get(0); - assert tasks.stream() - .allMatch(t -> t.batchingKey == firstTask.batchingKey) : "tasks submitted in a batch should share the same batching key: " - + tasks; + assert tasks.stream().allMatch(t -> t.batchingKey == firstTask.batchingKey) + : "tasks submitted in a batch should share the same batching key: " + tasks; // convert to an identity map to check for dups based on task identity final Map tasksIdentity = tasks.stream() .collect( @@ -124,8 +123,8 @@ private void onTimeoutInternal(List tasks, TimeValue time if (toRemove.isEmpty() == false) { BatchedTask firstTask = toRemove.get(0); Object batchingKey = firstTask.batchingKey; - assert tasks.stream() - .allMatch(t -> t.batchingKey == batchingKey) : "tasks submitted in a batch should share the same batching key: " + tasks; + assert tasks.stream().allMatch(t -> t.batchingKey == batchingKey) + : "tasks submitted in a batch should share the same batching key: " + tasks; synchronized (tasksPerBatchingKey) { LinkedHashSet existingTasks = tasksPerBatchingKey.get(batchingKey); if (existingTasks != null) { diff --git a/server/src/main/java/org/opensearch/common/LocalTimeOffset.java b/server/src/main/java/org/opensearch/common/LocalTimeOffset.java index d07b13ba0e123..94347c47e56e0 100644 --- a/server/src/main/java/org/opensearch/common/LocalTimeOffset.java +++ b/server/src/main/java/org/opensearch/common/LocalTimeOffset.java @@ -570,12 +570,8 @@ protected static Transition buildTransition(ZoneOffsetTransition transition, Loc long utcStart = transition.toEpochSecond() * 1000; long offsetBeforeMillis = transition.getOffsetBefore().getTotalSeconds() * 1000; long offsetAfterMillis = transition.getOffsetAfter().getTotalSeconds() * 1000; - assert (false == previous instanceof Transition) - || ((Transition) previous).startUtcMillis < utcStart : "transition list out of order at [" - + previous - + "] and [" - + transition - + "]"; + assert (false == previous instanceof Transition) || ((Transition) previous).startUtcMillis < utcStart + : "transition list out of order at [" + previous + "] and [" + transition + "]"; assert previous.millis != offsetAfterMillis : "transition list is has a duplicate at [" + previous + "] and [" diff --git a/server/src/main/java/org/opensearch/common/breaker/CircuitBreaker.java b/server/src/main/java/org/opensearch/common/breaker/CircuitBreaker.java index 4a2663710c588..ca6c5aa9698c3 100644 --- a/server/src/main/java/org/opensearch/common/breaker/CircuitBreaker.java +++ b/server/src/main/java/org/opensearch/common/breaker/CircuitBreaker.java @@ -66,12 +66,6 @@ public interface CircuitBreaker { * writing requests on the network layer. */ String IN_FLIGHT_REQUESTS = "in_flight_requests"; - /** - * The accounting breaker tracks things held in memory that is independent - * of the request lifecycle. This includes memory used by Lucene for - * segments. - */ - String ACCOUNTING = "accounting"; enum Type { // A regular or ChildMemoryCircuitBreaker diff --git a/server/src/main/java/org/opensearch/common/io/stream/BytesStreamInput.java b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamInput.java new file mode 100644 index 0000000000000..e593f3c89b008 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamInput.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import org.apache.lucene.util.BytesRef; + +import java.io.EOFException; +import java.io.IOException; + +/** + * {@link StreamInput} version of Lucene's {@link org.apache.lucene.store.ByteArrayDataInput} + * This is used as a replacement of Lucene ByteArrayDataInput for abstracting byte order changes + * in Lucene's API + * + * Attribution given to apache lucene project under ALv2: + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class BytesStreamInput extends StreamInput { + private byte[] bytes; + private int pos; + private int limit; + + public BytesStreamInput(byte[] bytes) { + reset(bytes); + } + + public BytesStreamInput(byte[] bytes, int offset, int len) { + reset(bytes, offset, len); + } + + public BytesStreamInput() { + reset(BytesRef.EMPTY_BYTES); + } + + public void reset(byte[] bytes) { + reset(bytes, 0, bytes.length); + } + + public int getPosition() { + return pos; + } + + public void setPosition(int pos) { + this.pos = pos; + } + + public void reset(byte[] bytes, int offset, int len) { + this.bytes = bytes; + pos = offset; + limit = offset + len; + } + + public boolean eof() { + return pos == limit; + } + + public void skipBytes(long count) { + pos += count; + } + + // NOTE: AIOOBE not EOF if you read too much + @Override + public byte readByte() { + return bytes[pos++]; + } + + // NOTE: AIOOBE not EOF if you read too much + @Override + public void readBytes(byte[] b, int offset, int len) { + System.arraycopy(bytes, pos, b, offset, len); + pos += len; + } + + @Override + public void close() {} + + @Override + public int available() { + return limit - pos; + } + + @Override + protected void ensureCanReadBytes(int length) throws EOFException { + int available = available(); + if (length > available) { + throw new EOFException("attempting to read " + length + " bytes but only " + available + " bytes are available"); + } + } + + @Override + public int read() throws IOException { + return bytes[pos++] & 0xFF; + } + +} diff --git a/server/src/main/java/org/opensearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/opensearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index a25587c36a3e5..362badf046b75 100644 --- a/server/src/main/java/org/opensearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/opensearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -117,10 +117,8 @@ final class PerThreadIDVersionAndSeqNoLookup { * entirely for these readers. */ public DocIdAndVersion lookupVersion(BytesRef id, boolean loadSeqNo, LeafReaderContext context) throws IOException { - assert context.reader() - .getCoreCacheHelper() - .getKey() - .equals(readerKey) : "context's reader is not the same as the reader class was initialized on."; + assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) + : "context's reader is not the same as the reader class was initialized on."; int docID = getDocID(id, context); if (docID != DocIdSetIterator.NO_MORE_DOCS) { @@ -174,10 +172,8 @@ private static long readNumericDocValues(LeafReader reader, String field, int do /** Return null if id is not found. */ DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOException { - assert context.reader() - .getCoreCacheHelper() - .getKey() - .equals(readerKey) : "context's reader is not the same as the reader class was initialized on."; + assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) + : "context's reader is not the same as the reader class was initialized on."; final int docID = getDocID(id, context); if (docID != DocIdSetIterator.NO_MORE_DOCS) { final long seqNo = readNumericDocValues(context.reader(), SeqNoFieldMapper.NAME, docID); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index f1187f3344c83..308ab13a8a785 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -219,6 +219,7 @@ public void apply(Settings value, Settings current, Settings previous) { AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, @@ -328,8 +329,6 @@ public void apply(Settings value, Settings current, Settings previous) { HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, IndexModule.NODE_STORE_ALLOW_MMAP, ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.USER_DEFINED_METADATA, @@ -518,6 +517,7 @@ public void apply(Settings value, Settings current, Settings previous) { FastVectorHighlighter.SETTING_TV_HIGHLIGHT_MULTI_VALUE, Node.BREAKER_TYPE_KEY, OperationRouting.USE_ADAPTIVE_REPLICA_SELECTION_SETTING, + OperationRouting.IGNORE_AWARENESS_ATTRIBUTES_SETTING, IndexGraveyard.SETTING_MAX_TOMBSTONES, PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 44fa83f40ce74..bd6490af5c071 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -118,7 +118,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index 63801e1ef16ff..8618687218987 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -77,12 +77,12 @@ /** * A setting. Encapsulates typical stuff like default value, parsing, and scope. - * Some (SettingsProperty.Dynamic) can by modified at run time using the API. + * Some (Settings.Property.Dynamic) can be modified at run time using the API. * All settings inside opensearch or in any of the plugins should use this type-safe and generic settings infrastructure * together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward * to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this: *

{@code
- * public static final Setting; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, SettingsProperty.NodeScope);}
+ * public static final Setting MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, Setting.Property.NodeScope);}
  * 
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method. *
@@ -94,7 +94,7 @@
  *     RED, GREEN, BLUE;
  * }
  * public static final Setting MY_BOOLEAN =
- *     new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, SettingsProperty.NodeScope);
+ *     new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, Setting.Property.NodeScope);
  * }
  * 
*/ @@ -173,9 +173,8 @@ private Setting( Validator validator, Property... properties ) { - assert this instanceof SecureSetting - || this.isGroupSetting() - || parser.apply(defaultValue.apply(Settings.EMPTY)) != null : "parser returned null"; + assert this instanceof SecureSetting || this.isGroupSetting() || parser.apply(defaultValue.apply(Settings.EMPTY)) != null + : "parser returned null"; this.key = key; this.fallbackSetting = fallbackSetting; this.defaultValue = defaultValue; @@ -279,7 +278,7 @@ public Setting(String key, Function defaultValue, Function getNamespaces(Settings settings) { } /** - * Returns a map of all namespaces to it's values give the provided settings + * Returns a map of all namespaces to its values give the provided settings */ public Map getAsMap(Settings settings) { Map map = new HashMap<>(); @@ -1759,7 +1758,7 @@ public static ByteSizeValue parseByteSize(String s, ByteSizeValue minValue, Byte * * @param key the key for the setting * @param defaultValue the default value for this setting - * @param properties properties properties for this setting like scope, filtering... + * @param properties properties for this setting like scope, filtering... * @return the setting object */ public static Setting memorySizeSetting(String key, ByteSizeValue defaultValue, Property... properties) { @@ -1773,7 +1772,7 @@ public static Setting memorySizeSetting(String key, ByteSizeValue * * @param key the key for the setting * @param defaultValue a function that supplies the default value for this setting - * @param properties properties properties for this setting like scope, filtering... + * @param properties properties for this setting like scope, filtering... * @return the setting object */ public static Setting memorySizeSetting(String key, Function defaultValue, Property... properties) { @@ -1787,7 +1786,7 @@ public static Setting memorySizeSetting(String key, Function memorySizeSetting(String key, String defaultPercentage, Property... properties) { @@ -1801,7 +1800,7 @@ public static Setting memorySizeSetting(String key, String defaul * * @param key the key for the setting * @param fallbackSetting a memory size setting to use as fallback - * @param properties properties properties for this setting like scope, filtering... + * @param properties properties for this setting like scope, filtering... * @return the setting object */ public static Setting memorySizeSetting(String key, Setting fallbackSetting, Property... properties) { @@ -1980,7 +1979,7 @@ static void logSettingUpdate(Setting setting, Settings current, Settings previou * Creates a group of settings prefixed by a key. * * @param key the group key for the setting - * @param properties properties properties for this setting like scope, filtering... + * @param properties properties for this setting like scope, filtering... * @return the group setting object */ public static Setting groupSetting(String key, Property... properties) { @@ -1992,7 +1991,7 @@ public static Setting groupSetting(String key, Property... properties) * * @param key the group key for the setting * @param validator a {@link Validator} for validating this setting - * @param properties properties properties for this setting like scope, filtering... + * @param properties properties for this setting like scope, filtering... * @return the group setting object */ public static Setting groupSetting(String key, Consumer validator, Property... properties) { @@ -2004,7 +2003,7 @@ public static Setting groupSetting(String key, Consumer vali * * @param key the group key for the setting * @param fallback a {@link GroupSetting} to use as fallback when no group key values exist - * @param properties properties properties for this setting like scope, filtering... + * @param properties properties for this setting like scope, filtering... * @return the group setting object */ public static Setting groupSetting(String key, final Setting fallback, Property... properties) { @@ -2017,7 +2016,7 @@ public static Setting groupSetting(String key, final Setting * @param key the group key for the setting * @param fallback a {@link GroupSetting} to use as fallback when no group key values exist * @param validator a {@link Validator} for validating this setting - * @param properties properties properties for this setting like scope, filtering... + * @param properties properties for this setting like scope, filtering... * @return the group setting object */ public static Setting groupSetting( diff --git a/server/src/main/java/org/opensearch/common/util/BigArrays.java b/server/src/main/java/org/opensearch/common/util/BigArrays.java index 287c0999d8998..e877f75bd2a0f 100644 --- a/server/src/main/java/org/opensearch/common/util/BigArrays.java +++ b/server/src/main/java/org/opensearch/common/util/BigArrays.java @@ -455,12 +455,11 @@ public CircuitBreakerService breakerService() { private T resizeInPlace(T array, long newSize) { final long oldMemSize = array.ramBytesUsed(); final long oldSize = array.size(); - assert oldMemSize == array.ramBytesEstimated( - oldSize - ) : "ram bytes used should equal that which was previously estimated: ramBytesUsed=" - + oldMemSize - + ", ramBytesEstimated=" - + array.ramBytesEstimated(oldSize); + assert oldMemSize == array.ramBytesEstimated(oldSize) + : "ram bytes used should equal that which was previously estimated: ramBytesUsed=" + + oldMemSize + + ", ramBytesEstimated=" + + array.ramBytesEstimated(oldSize); final long estimatedIncreaseInBytes = array.ramBytesEstimated(newSize) - oldMemSize; adjustBreaker(estimatedIncreaseInBytes, false); array.resize(newSize); diff --git a/server/src/main/java/org/opensearch/common/util/ByteUtils.java b/server/src/main/java/org/opensearch/common/util/ByteUtils.java index 65cf9b567c73e..b0054bc1a06b7 100644 --- a/server/src/main/java/org/opensearch/common/util/ByteUtils.java +++ b/server/src/main/java/org/opensearch/common/util/ByteUtils.java @@ -32,15 +32,11 @@ package org.opensearch.common.util; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteArrayDataOutput; - /** Utility methods to do byte-level encoding. These methods are biased towards little-endian byte order because it is the most * common byte order and reading several bytes at once may be optimizable in the future with the help of sun.mist.Unsafe. */ -public enum ByteUtils { - ; +public final class ByteUtils { - public static final int MAX_BYTES_VLONG = 9; + private ByteUtils() {}; /** Zig-zag decode. */ public static long zigZagDecode(long n) { @@ -107,46 +103,4 @@ public static void writeFloatLE(float d, byte[] arr, int offset) { public static float readFloatLE(byte[] arr, int offset) { return Float.intBitsToFloat(readIntLE(arr, offset)); } - - /** Same as DataOutput#writeVLong but accepts negative values (written on 9 bytes). */ - public static void writeVLong(ByteArrayDataOutput out, long i) { - for (int k = 0; k < 8 && (i & ~0x7FL) != 0L; ++k) { - out.writeByte((byte) ((i & 0x7FL) | 0x80L)); - i >>>= 7; - } - out.writeByte((byte) i); - } - - /** Same as DataOutput#readVLong but can read negative values (read on 9 bytes). */ - public static long readVLong(ByteArrayDataInput in) { - // unwinded because of hotspot bugs, see Lucene's impl - byte b = in.readByte(); - if (b >= 0) return b; - long i = b & 0x7FL; - b = in.readByte(); - i |= (b & 0x7FL) << 7; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 14; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 21; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 28; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 35; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 42; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 49; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0xFFL) << 56; - return i; - } - } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java index eb0e5bb6ca511..39561039c5c6f 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java @@ -178,12 +178,11 @@ protected void afterExecute(Runnable r, Throwable t) { final long totalNanos = totalTaskNanos.addAndGet(taskNanos); final long taskExecutionNanos = timedRunnable.getTotalExecutionNanos(); - assert taskExecutionNanos >= 0 - || (failedOrRejected - && taskExecutionNanos == -1) : "expected task to always take longer than 0 nanoseconds or have '-1' failure code, got: " - + taskExecutionNanos - + ", failedOrRejected: " - + failedOrRejected; + assert taskExecutionNanos >= 0 || (failedOrRejected && taskExecutionNanos == -1) + : "expected task to always take longer than 0 nanoseconds or have '-1' failure code, got: " + + taskExecutionNanos + + ", failedOrRejected: " + + failedOrRejected; if (taskExecutionNanos != -1) { // taskExecutionNanos may be -1 if the task threw an exception executionEWMA.addValue(taskExecutionNanos); diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index db62f8f8901f4..06109e7fcdc5b 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -563,30 +563,25 @@ public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSet } private static boolean assertPathsDoNotExist(final Path[] paths) { - Set existingPaths = Stream.of(paths) - .filter(FileSystemUtils::exists) - .filter( - leftOver -> { - // Relaxed assertion for the special case where only the empty state directory exists after deleting - // the shard directory because it was created again as a result of a metadata read action concurrently. - try (DirectoryStream children = Files.newDirectoryStream(leftOver)) { - Iterator iter = children.iterator(); - if (iter.hasNext() == false) { - return true; - } - Path maybeState = iter.next(); - if (iter.hasNext() || maybeState.equals(leftOver.resolve(MetadataStateFormat.STATE_DIR_NAME)) == false) { - return true; - } - try (DirectoryStream stateChildren = Files.newDirectoryStream(maybeState)) { - return stateChildren.iterator().hasNext(); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } + Set existingPaths = Stream.of(paths).filter(FileSystemUtils::exists).filter(leftOver -> { + // Relaxed assertion for the special case where only the empty state directory exists after deleting + // the shard directory because it was created again as a result of a metadata read action concurrently. + try (DirectoryStream children = Files.newDirectoryStream(leftOver)) { + Iterator iter = children.iterator(); + if (iter.hasNext() == false) { + return true; } - ) - .collect(Collectors.toSet()); + Path maybeState = iter.next(); + if (iter.hasNext() || maybeState.equals(leftOver.resolve(MetadataStateFormat.STATE_DIR_NAME)) == false) { + return true; + } + try (DirectoryStream stateChildren = Files.newDirectoryStream(maybeState)) { + return stateChildren.iterator().hasNext(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }).collect(Collectors.toSet()); assert existingPaths.size() == 0 : "Paths exist that should have been deleted: " + existingPaths; return existingPaths.size() == 0; } diff --git a/server/src/main/java/org/opensearch/env/NodeMetadata.java b/server/src/main/java/org/opensearch/env/NodeMetadata.java index bce0209732356..cb6257002b62b 100644 --- a/server/src/main/java/org/opensearch/env/NodeMetadata.java +++ b/server/src/main/java/org/opensearch/env/NodeMetadata.java @@ -125,8 +125,8 @@ public void setNodeVersionId(int nodeVersionId) { public NodeMetadata build() { final Version nodeVersion; if (this.nodeVersion == null) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major - + 1 : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 + : "version is required in the node metadata from v9 onwards"; nodeVersion = Version.V_EMPTY; } else { nodeVersion = this.nodeVersion; diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index ce4f749be0fe5..fd978a9c8ed8b 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -134,8 +134,8 @@ public void start( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major - + 1 : "legacy metadata loader is not needed anymore from v9 onwards"; + assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 + : "legacy metadata loader is not needed anymore from v9 onwards"; final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index 1e6c6d15dd91c..dbde8abf02d6d 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -368,11 +368,10 @@ protected static NodeShardsResult buildNodeShardsResult( } if (allocationId != null) { - assert nodeShardState.storeException() == null - || nodeShardState - .storeException() instanceof ShardLockObtainFailedException : "only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a " - + "store throwing " - + nodeShardState.storeException(); + assert nodeShardState.storeException() == null || nodeShardState.storeException() instanceof ShardLockObtainFailedException + : "only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a " + + "store throwing " + + nodeShardState.storeException(); numberOfAllocationsFound++; if (matchAnyShard || inSyncAllocationIds.contains(nodeShardState.allocationId())) { nodeShardStates.add(nodeShardState); diff --git a/server/src/main/java/org/opensearch/http/DefaultRestChannel.java b/server/src/main/java/org/opensearch/http/DefaultRestChannel.java index 70c386b16ee03..d94eadf82463a 100644 --- a/server/src/main/java/org/opensearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/opensearch/http/DefaultRestChannel.java @@ -128,8 +128,8 @@ public void sendResponse(RestResponse restResponse) { finalContent = BytesArray.EMPTY; } } catch (IllegalArgumentException ignored) { - assert restResponse - .status() == RestStatus.METHOD_NOT_ALLOWED : "request HTTP method is unsupported but HTTP status is not METHOD_NOT_ALLOWED(405)"; + assert restResponse.status() == RestStatus.METHOD_NOT_ALLOWED + : "request HTTP method is unsupported but HTTP status is not METHOD_NOT_ALLOWED(405)"; } final HttpResponse httpResponse = httpRequest.createResponse(restResponse.status(), finalContent); diff --git a/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java index 60c012d9e652a..79c97d1126e93 100644 --- a/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java @@ -135,21 +135,6 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha } } - @Override - public void onShardInactive(IndexShard indexShard) { - for (IndexEventListener listener : listeners) { - try { - listener.onShardInactive(indexShard); - } catch (Exception e) { - logger.warn( - () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), - e - ); - throw e; - } - } - } - @Override public void indexShardStateChanged( IndexShard indexShard, diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 1de3c2cf571f9..c1d3f4ddab147 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -629,8 +629,7 @@ public IndexSettings getIndexSettings() { } /** - * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via - * {@link QueryShardContext#setTypes(String...)}. + * Creates a new QueryShardContext. * * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 36fcae1d93d62..d5ea7a037af5d 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -353,11 +353,11 @@ public final class IndexSettings { /** * Specifies if the index should use soft-delete instead of hard-delete for update/delete operations. - * Soft-deletes is enabled by default for 7.0+ indices. + * Soft-deletes is enabled by default for Legacy 7.x and 1.x indices and mandatory for 2.0+ indices. */ public static final Setting INDEX_SOFT_DELETES_SETTING = Setting.boolSetting( "index.soft_deletes.enabled", - settings -> Boolean.toString(IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(LegacyESVersion.V_7_0_0)), + true, Property.IndexScope, Property.Final ); @@ -677,6 +677,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); softDeleteEnabled = scopedSettings.get(INDEX_SOFT_DELETES_SETTING); + assert softDeleteEnabled || version.before(Version.V_2_0_0) : "soft deletes must be enabled in version " + version; softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); retentionLeaseMillis = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING).millis(); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); @@ -723,10 +724,6 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, mergePolicyConfig::setMaxMergesAtOnce ); - scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, - mergePolicyConfig::setMaxMergesAtOnceExplicit - ); scopedSettings.addSettingsUpdateConsumer( MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, mergePolicyConfig::setMaxMergedSegment diff --git a/server/src/main/java/org/opensearch/index/IndexingPressureService.java b/server/src/main/java/org/opensearch/index/IndexingPressureService.java index c1fc97ed1138f..9044c5942e8cb 100644 --- a/server/src/main/java/org/opensearch/index/IndexingPressureService.java +++ b/server/src/main/java/org/opensearch/index/IndexingPressureService.java @@ -13,6 +13,8 @@ import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; +import java.util.function.LongSupplier; + /** * Sets up classes for node/shard level indexing pressure. * Provides abstraction and orchestration for indexing pressure interfaces when called from Transport Actions or for Stats. @@ -25,22 +27,48 @@ public IndexingPressureService(Settings settings, ClusterService clusterService) shardIndexingPressure = new ShardIndexingPressure(settings, clusterService); } - public Releasable markCoordinatingOperationStarted(long bytes, boolean forceExecution) { + /** + * Marks the beginning of coordinating operation for an indexing request on the node. Rejects the operation if node's + * memory limit is breached. + * Performs the node level accounting only if shard indexing pressure is disabled. Else empty releasable is returned. + * @param bytes memory bytes to be tracked for the current operation + * @param forceExecution permits operation even if the node level memory limit is breached + * @return Releasable to mark the completion of operation and release the accounted bytes + */ + public Releasable markCoordinatingOperationStarted(LongSupplier bytes, boolean forceExecution) { if (isShardIndexingPressureEnabled() == false) { - return shardIndexingPressure.markCoordinatingOperationStarted(bytes, forceExecution); + return shardIndexingPressure.markCoordinatingOperationStarted(bytes.getAsLong(), forceExecution); } else { return () -> {}; } } - public Releasable markCoordinatingOperationStarted(ShardId shardId, long bytes, boolean forceExecution) { + /** + * Marks the beginning of coordinating operation for an indexing request on the Shard. Rejects the operation if shard's + * memory limit is breached. + * Performs the shard level accounting only if shard indexing pressure is enabled. Else empty releasable is returned. + * @param shardId Shard ID for which the current indexing operation is targeted for + * @param bytes memory bytes to be tracked for the current operation + * @param forceExecution permits operation even if the node level memory limit is breached + * @return Releasable to mark the completion of operation and release the accounted bytes + */ + public Releasable markCoordinatingOperationStarted(ShardId shardId, LongSupplier bytes, boolean forceExecution) { if (isShardIndexingPressureEnabled()) { - return shardIndexingPressure.markCoordinatingOperationStarted(shardId, bytes, forceExecution); + return shardIndexingPressure.markCoordinatingOperationStarted(shardId, bytes.getAsLong(), forceExecution); } else { return () -> {}; } } + /** + * Marks the beginning of primary operation for an indexing request. Rejects the operation if memory limit is breached. + * Performs the node level accounting only if shard indexing pressure is not disabled. Else shard level accounting + * is performed. + * @param shardId Shard ID for which the current indexing operation is targeted for + * @param bytes memory bytes to be tracked for the current operation + * @param forceExecution permits operation even if the memory limit is breached + * @return Releasable to mark the completion of operation and release the accounted bytes + */ public Releasable markPrimaryOperationStarted(ShardId shardId, long bytes, boolean forceExecution) { if (isShardIndexingPressureEnabled()) { return shardIndexingPressure.markPrimaryOperationStarted(shardId, bytes, forceExecution); @@ -49,6 +77,15 @@ public Releasable markPrimaryOperationStarted(ShardId shardId, long bytes, boole } } + /** + * Marks the beginning of primary operation for an indexing request, when primary shard is local to the coordinator node. + * Rejects the operation if memory limit is breached. + * Performs the node level accounting only if shard indexing pressure is not disabled. Else shard level accounting + * is performed. + * @param shardId Shard ID for which the current indexing operation is targeted for + * @param bytes memory bytes to be tracked for the current operation + * @return Releasable to mark the completion of operation and release the accounted bytes + */ public Releasable markPrimaryOperationLocalToCoordinatingNodeStarted(ShardId shardId, long bytes) { if (isShardIndexingPressureEnabled()) { return shardIndexingPressure.markPrimaryOperationLocalToCoordinatingNodeStarted(shardId, bytes); @@ -57,6 +94,15 @@ public Releasable markPrimaryOperationLocalToCoordinatingNodeStarted(ShardId sha } } + /** + * Marks the beginning of replication operation for an indexing request. Rejects the operation if memory limit is breached. + * Performs the node level accounting only if shard indexing pressure is not disabled. Else shard level accounting + * is performed. + * @param shardId Shard ID for which the current indexing operation is targeted for + * @param bytes memory bytes to be tracked for the current operation + * @param forceExecution permits operation even if the memory limit is breached + * @return Releasable to mark the completion of operation and release the accounted bytes + */ public Releasable markReplicaOperationStarted(ShardId shardId, long bytes, boolean forceExecution) { if (isShardIndexingPressureEnabled()) { return shardIndexingPressure.markReplicaOperationStarted(shardId, bytes, forceExecution); diff --git a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java b/server/src/main/java/org/opensearch/index/MergePolicyConfig.java index f5188a3348b88..26d2588e3f240 100644 --- a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/opensearch/index/MergePolicyConfig.java @@ -76,11 +76,6 @@ * Maximum number of segments to be merged at a time during "normal" merging. * Default is 10. * - *
  • index.merge.policy.max_merge_at_once_explicit: - * - * Maximum number of segments to be merged at a time, during force merge or - * expungeDeletes. Default is 30. - * *
  • index.merge.policy.max_merged_segment: * * Maximum sized segment to produce during normal merging (not explicit @@ -136,7 +131,6 @@ public final class MergePolicyConfig { public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; - public static final int DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT = 30; public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; @@ -169,13 +163,6 @@ public final class MergePolicyConfig { Property.Dynamic, Property.IndexScope ); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = Setting.intSetting( - "index.merge.policy.max_merge_at_once_explicit", - DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, - 2, - Property.Dynamic, - Property.IndexScope - ); public static final Setting INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( "index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, @@ -213,7 +200,6 @@ public final class MergePolicyConfig { double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); - int maxMergeAtOnceExplicit = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING); // TODO is this really a good default number for max_merge_segment, what happens for large indices, // won't they end up with many segments? ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); @@ -232,19 +218,17 @@ public final class MergePolicyConfig { mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit); mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); mergePolicy.setSegmentsPerTier(segmentsPerTier); mergePolicy.setDeletesPctAllowed(deletesPctAllowed); if (logger.isTraceEnabled()) { logger.trace( "using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," - + " max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}]," + + " max_merge_at_once[{}], max_merged_segment[{}], segments_per_tier[{}]," + " deletes_pct_allowed[{}]", forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, - maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, deletesPctAllowed @@ -260,10 +244,6 @@ void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); } - void setMaxMergesAtOnceExplicit(Integer maxMergeAtOnceExplicit) { - mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit); - } - void setMaxMergesAtOnce(Integer maxMergeAtOnce) { mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); } diff --git a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java index 2c120a5ecd989..0289e96b356be 100644 --- a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java +++ b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java @@ -99,15 +99,6 @@ public int getMaxMergeAtOnce() { return regularMergePolicy.getMaxMergeAtOnce(); } - public void setMaxMergeAtOnceExplicit(int maxMergeAtOnceExplicit) { - regularMergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit); - forcedMergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit); - } - - public int getMaxMergeAtOnceExplicit() { - return forcedMergePolicy.getMaxMergeAtOnceExplicit(); - } - // only setter that must NOT delegate to the forced merge policy public void setMaxMergedSegmentMB(double mbFrac) { regularMergePolicy.setMaxMergedSegmentMB(mbFrac); diff --git a/server/src/main/java/org/opensearch/index/SearchSlowLog.java b/server/src/main/java/org/opensearch/index/SearchSlowLog.java index b463f46e476bf..37413388215c8 100644 --- a/server/src/main/java/org/opensearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/opensearch/index/SearchSlowLog.java @@ -47,7 +47,6 @@ import org.opensearch.tasks.Task; import java.nio.charset.Charset; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -220,8 +219,6 @@ private static Map prepareMap(SearchContext context, long tookIn } else { messageFields.put("total_hits", "-1"); } - String[] types = context.getQueryShardContext().getTypes(); - messageFields.put("types", escapeJson(asJsonArray(types != null ? Arrays.stream(types) : Stream.empty()))); messageFields.put( "stats", escapeJson(asJsonArray(context.groupStats() != null ? context.groupStats().stream() : Stream.empty())) @@ -259,13 +256,6 @@ private static String message(SearchContext context, long tookInNanos) { sb.append("-1"); } sb.append("], "); - if (context.getQueryShardContext().getTypes() == null) { - sb.append("types[], "); - } else { - sb.append("types["); - Strings.arrayToDelimitedString(context.getQueryShardContext().getTypes(), ",", sb); - sb.append("], "); - } if (context.groupStats() == null) { sb.append("stats[], "); } else { diff --git a/server/src/main/java/org/opensearch/index/analysis/AbstractIndexAnalyzerProvider.java b/server/src/main/java/org/opensearch/index/analysis/AbstractIndexAnalyzerProvider.java index fadd6be733637..e1c5fd0ba3c64 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AbstractIndexAnalyzerProvider.java +++ b/server/src/main/java/org/opensearch/index/analysis/AbstractIndexAnalyzerProvider.java @@ -33,7 +33,6 @@ package org.opensearch.index.analysis; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.util.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.AbstractIndexComponent; import org.opensearch.index.IndexSettings; @@ -42,8 +41,6 @@ public abstract class AbstractIndexAnalyzerProvider extends private final String name; - protected final Version version; - /** * Constructs a new analyzer component, with the index name and its settings and the analyzer name. * @@ -53,7 +50,6 @@ public abstract class AbstractIndexAnalyzerProvider extends public AbstractIndexAnalyzerProvider(IndexSettings indexSettings, String name, Settings settings) { super(indexSettings); this.name = name; - this.version = Analysis.parseAnalysisVersion(this.indexSettings.getSettings(), settings, logger); } /** diff --git a/server/src/main/java/org/opensearch/index/analysis/AbstractTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/AbstractTokenFilterFactory.java index f3222753fabb5..fb465eb273382 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AbstractTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/AbstractTokenFilterFactory.java @@ -32,7 +32,6 @@ package org.opensearch.index.analysis; -import org.apache.lucene.util.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.AbstractIndexComponent; import org.opensearch.index.IndexSettings; @@ -41,20 +40,13 @@ public abstract class AbstractTokenFilterFactory extends AbstractIndexComponent private final String name; - protected final Version version; - public AbstractTokenFilterFactory(IndexSettings indexSettings, String name, Settings settings) { super(indexSettings); this.name = name; - this.version = Analysis.parseAnalysisVersion(this.indexSettings.getSettings(), settings, logger); } @Override public String name() { return this.name; } - - public final Version version() { - return version; - } } diff --git a/server/src/main/java/org/opensearch/index/analysis/AbstractTokenizerFactory.java b/server/src/main/java/org/opensearch/index/analysis/AbstractTokenizerFactory.java index 49c18d5c1d704..9a0f4196bab16 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AbstractTokenizerFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/AbstractTokenizerFactory.java @@ -32,25 +32,18 @@ package org.opensearch.index.analysis; -import org.apache.lucene.util.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.AbstractIndexComponent; import org.opensearch.index.IndexSettings; public abstract class AbstractTokenizerFactory extends AbstractIndexComponent implements TokenizerFactory { - protected final Version version; private final String name; public AbstractTokenizerFactory(IndexSettings indexSettings, Settings settings, String name) { super(indexSettings); - this.version = Analysis.parseAnalysisVersion(this.indexSettings.getSettings(), settings, logger); this.name = name; } - public final Version version() { - return version; - } - @Override public String name() { return name; diff --git a/server/src/main/java/org/opensearch/index/analysis/Analysis.java b/server/src/main/java/org/opensearch/index/analysis/Analysis.java index 38c5beaf9d80e..90bb21cfc0a4b 100644 --- a/server/src/main/java/org/opensearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/opensearch/index/analysis/Analysis.java @@ -32,7 +32,6 @@ package org.opensearch.index.analysis; -import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.ar.ArabicAnalyzer; import org.apache.lucene.analysis.bg.BulgarianAnalyzer; @@ -68,9 +67,7 @@ import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; -import org.apache.lucene.util.Version; import org.opensearch.common.Strings; -import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; @@ -93,21 +90,6 @@ public class Analysis { - public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, Logger logger) { - // check for explicit version on the specific analyzer component - String sVersion = settings.get("version"); - if (sVersion != null) { - return Lucene.parseVersion(sVersion, Version.LATEST, logger); - } - // check for explicit version on the index itself as default for all analysis components - sVersion = indexSettings.get("index.analysis.version"); - if (sVersion != null) { - return Lucene.parseVersion(sVersion, Version.LATEST, logger); - } - // resolve the analysis version based on the version the index was created with - return org.opensearch.Version.indexCreated(indexSettings).luceneVersion; - } - public static CharArraySet parseStemExclusion(Settings settings, CharArraySet defaultStemExclusion) { String value = settings.get("stem_exclusion"); if ("_none_".equals(value)) { diff --git a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index aa3343f69b137..d2c5384153bd2 100644 --- a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -60,17 +60,13 @@ public class PreBuiltAnalyzerProviderFactory extends PreConfiguredAnalysisCompon PreBuiltAnalyzerProviderFactory(String name, PreBuiltAnalyzers preBuiltAnalyzer) { super(name, new PreBuiltAnalyzersDelegateCache(name, preBuiltAnalyzer)); this.create = preBuiltAnalyzer::getAnalyzer; - Analyzer analyzer = preBuiltAnalyzer.getAnalyzer(Version.CURRENT); - analyzer.setVersion(Version.CURRENT.luceneVersion); - current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer); + current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, preBuiltAnalyzer.getAnalyzer(Version.CURRENT)); } public PreBuiltAnalyzerProviderFactory(String name, PreBuiltCacheFactory.CachingStrategy cache, Supplier create) { super(name, cache); this.create = version -> create.get(); - Analyzer analyzer = create.get(); - analyzer.setVersion(Version.CURRENT.luceneVersion); - this.current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer); + this.current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, create.get()); } @Override @@ -88,7 +84,6 @@ public AnalyzerProvider get(IndexSettings indexSettings, Environment environm protected AnalyzerProvider create(Version version) { assert Version.CURRENT.equals(version) == false; Analyzer analyzer = create.apply(version); - analyzer.setVersion(version.luceneVersion); return new PreBuiltAnalyzerProvider(getName(), AnalyzerScope.INDICES, analyzer); } diff --git a/server/src/main/java/org/opensearch/index/analysis/SimpleAnalyzerProvider.java b/server/src/main/java/org/opensearch/index/analysis/SimpleAnalyzerProvider.java index 6fff247df63d7..85d25aaa753e6 100644 --- a/server/src/main/java/org/opensearch/index/analysis/SimpleAnalyzerProvider.java +++ b/server/src/main/java/org/opensearch/index/analysis/SimpleAnalyzerProvider.java @@ -44,7 +44,6 @@ public class SimpleAnalyzerProvider extends AbstractIndexAnalyzerProvider searcherFactory, @@ -767,7 +738,7 @@ public enum SearcherScope { /** * Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed */ - public abstract Closeable acquireHistoryRetentionLock(HistorySource historySource); + public abstract Closeable acquireHistoryRetentionLock(); /** * Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive). @@ -781,51 +752,7 @@ public abstract Translog.Snapshot newChangesSnapshot( boolean requiredFullRange ) throws IOException; - /** - * Creates a new history snapshot from either Lucene/Translog for reading operations whose seqno in the requesting - * seqno range (both inclusive). - */ - public Translog.Snapshot newChangesSnapshot( - String source, - HistorySource historySource, - MapperService mapperService, - long fromSeqNo, - long toSeqNo, - boolean requiredFullRange - ) throws IOException { - return newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange); - } - - /** - * Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive). - * The returned snapshot can be retrieved from either Lucene index or translog files. - */ - public abstract Translog.Snapshot readHistoryOperations( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo - ) throws IOException; - - /** - * Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine. - */ - public abstract int estimateNumberOfHistoryOperations( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo - ) throws IOException; - - /** - * Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog) - */ - public abstract boolean hasCompleteOperationHistory( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo - ) throws IOException; + public abstract boolean hasCompleteOperationHistory(String reason, long startingSeqNo); /** * Gets the minimum retained sequence number for this engine. @@ -903,14 +830,7 @@ public SegmentsStats segmentsStats(boolean includeSegmentFileSizes, boolean incl } protected void fillSegmentStats(SegmentReader segmentReader, boolean includeSegmentFileSizes, SegmentsStats stats) { - stats.add(1, segmentReader.ramBytesUsed()); - stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader())); - stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader())); - stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader())); - stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader())); - stats.addPointsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPointsReader())); - stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader())); - + stats.add(1); if (includeSegmentFileSizes) { // TODO: consider moving this to StoreStats stats.addFileSizes(getSegmentFileSizes(segmentReader)); @@ -1076,7 +996,6 @@ private void fillSegmentInfo(SegmentReader segmentReader, boolean verbose, boole } catch (IOException e) { logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } - segment.memoryInBytes = segmentReader.ramBytesUsed(); segment.segmentSort = info.info.getIndexSort(); if (verbose) { segment.ramTree = Accountables.namedAccountable("root", segmentReader); @@ -1147,20 +1066,17 @@ public boolean refreshNeeded() { * @param force if true a lucene commit is executed even if no changes need to be committed. * @param waitIfOngoing if true this call will block until all currently running flushes have finished. * Otherwise this call will return without blocking. - * @return the commit Id for the resulting commit */ - public abstract CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException; + public abstract void flush(boolean force, boolean waitIfOngoing) throws EngineException; /** * Flushes the state of the engine including the transaction log, clearing memory and persisting * documents in the lucene index to disk including a potentially heavy and durable fsync operation. * This operation is not going to block if another flush operation is currently running and won't write * a lucene commit if nothing needs to be committed. - * - * @return the commit Id for the resulting commit */ - public final CommitId flush() throws EngineException { - return flush(false, false); + public final void flush() throws EngineException { + flush(false, false); } /** @@ -1532,11 +1448,8 @@ public Index( assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin; assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative"; assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >= 0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset"; - assert (origin == Origin.PRIMARY) - || (ifSeqNo == UNASSIGNED_SEQ_NO - && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) : "cas operations are only allowed if origin is primary. get [" - + origin - + "]"; + assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) + : "cas operations are only allowed if origin is primary. get [" + origin + "]"; this.doc = doc; this.isRetry = isRetry; this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp; @@ -1651,11 +1564,8 @@ public Delete( assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin; assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative"; assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >= 0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset"; - assert (origin == Origin.PRIMARY) - || (ifSeqNo == UNASSIGNED_SEQ_NO - && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) : "cas operations are only allowed if origin is primary. get [" - + origin - + "]"; + assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) + : "cas operations are only allowed if origin is primary. get [" + origin + "]"; this.type = Objects.requireNonNull(type); this.id = Objects.requireNonNull(id); this.ifSeqNo = ifSeqNo; @@ -1776,16 +1686,15 @@ public int estimatedSizeInBytes() { public static class Get { private final boolean realtime; private final Term uid; - private final String type, id; + private final String id; private final boolean readFromTranslog; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; private long ifSeqNo = UNASSIGNED_SEQ_NO; private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; - public Get(boolean realtime, boolean readFromTranslog, String type, String id, Term uid) { + public Get(boolean realtime, boolean readFromTranslog, String id, Term uid) { this.realtime = realtime; - this.type = type; this.id = id; this.uid = uid; this.readFromTranslog = readFromTranslog; @@ -1795,10 +1704,6 @@ public boolean realtime() { return this.realtime; } - public String type() { - return type; - } - public String id() { return id; } @@ -1949,58 +1854,6 @@ private void awaitPendingClose() { } } - public static class CommitId implements Writeable { - - private final byte[] id; - - public CommitId(byte[] id) { - assert id != null; - this.id = Arrays.copyOf(id, id.length); - } - - /** - * Read from a stream. - */ - public CommitId(StreamInput in) throws IOException { - assert in != null; - this.id = in.readByteArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeByteArray(id); - } - - @Override - public String toString() { - return Base64.getEncoder().encodeToString(id); - } - - public boolean idsEqual(byte[] id) { - return Arrays.equals(id, this.id); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - CommitId commitId = (CommitId) o; - - return Arrays.equals(id, commitId.id); - - } - - @Override - public int hashCode() { - return Arrays.hashCode(id); - } - } - public static class IndexCommitRef implements Closeable { private final AtomicBoolean closed = new AtomicBoolean(); private final CheckedRunnable onClose; @@ -2180,12 +2033,4 @@ public interface TranslogRecoveryRunner { * to advance this marker to at least the given sequence number. */ public abstract void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary); - - /** - * Whether we should read history operations from translog or Lucene index - */ - public enum HistorySource { - TRANSLOG, - INDEX - } } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 8604aca8f6308..3b69a8a25753e 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -41,6 +41,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; @@ -190,7 +191,6 @@ public class InternalEngine extends Engine { private final CounterMetric numDocAppends = new CounterMetric(); private final CounterMetric numDocUpdates = new CounterMetric(); private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); - private final boolean softDeleteEnabled; private final SoftDeletesPolicy softDeletesPolicy; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; @@ -268,7 +268,6 @@ public InternalEngine(EngineConfig engineConfig) { }); assert translog.getGeneration() != null; this.translog = translog; - this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = new CombinedDeletionPolicy( logger, @@ -319,7 +318,7 @@ public InternalEngine(EngineConfig engineConfig) { this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getProcessedCheckpoint()); this.internalReaderManager.addListener(lastRefreshedCheckpointListener); maxSeqNoOfUpdatesOrDeletes = new AtomicLong(SequenceNumbers.max(localCheckpointTracker.getMaxSeqNo(), translog.getMaxSeqNo())); - if (softDeleteEnabled && localCheckpointTracker.getPersistedCheckpoint() < localCheckpointTracker.getMaxSeqNo()) { + if (localCheckpointTracker.getPersistedCheckpoint() < localCheckpointTracker.getMaxSeqNo()) { try (Searcher searcher = acquireSearcher("restore_version_map_and_checkpoint_tracker", SearcherScope.INTERNAL)) { restoreVersionMapAndCheckpointTracker(Lucene.wrapAllDocsLive(searcher.getDirectoryReader())); } catch (IOException e) { @@ -511,8 +510,8 @@ public int fillSeqNoGaps(long primaryTerm) throws IOException { } syncTranslog(); // to persist noops associated with the advancement of the local checkpoint - assert localCheckpointTracker - .getPersistedCheckpoint() == maxSeqNo : "persisted local checkpoint did not advance to max seq no; is [" + assert localCheckpointTracker.getPersistedCheckpoint() == maxSeqNo + : "persisted local checkpoint did not advance to max seq no; is [" + localCheckpointTracker.getPersistedCheckpoint() + "], max seq no [" + maxSeqNo @@ -641,47 +640,6 @@ public void syncTranslog() throws IOException { revisitIndexDeletionPolicyOnTranslogSynced(); } - /** - * Creates a new history snapshot for reading operations since the provided seqno. - * The returned snapshot can be retrieved from either Lucene index or translog files. - */ - @Override - public Translog.Snapshot readHistoryOperations( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo - ) throws IOException { - if (historySource == HistorySource.INDEX) { - ensureSoftDeletesEnabled(); - return newChangesSnapshot(reason, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); - } else { - return getTranslog().newSnapshot(startingSeqNo, Long.MAX_VALUE); - } - } - - /** - * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. - */ - @Override - public int estimateNumberOfHistoryOperations( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo - ) throws IOException { - if (historySource == HistorySource.INDEX) { - ensureSoftDeletesEnabled(); - try ( - Translog.Snapshot snapshot = newChangesSnapshot(reason, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false) - ) { - return snapshot.totalOperations(); - } - } else { - return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); - } - } - @Override public TranslogStats getTranslogStats() { return getTranslog().stats(); @@ -735,10 +693,7 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external try { try { final OpenSearchDirectoryReader directoryReader = OpenSearchDirectoryReader.wrap(getDirectoryReader(), shardId); - internalReaderManager = new OpenSearchReaderManager( - directoryReader, - new RamAccountingRefreshListener(engineConfig.getCircuitBreakerService()) - ); + internalReaderManager = new OpenSearchReaderManager(directoryReader); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); success = true; @@ -902,8 +857,10 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) } else if (op.seqNo() > docAndSeqNo.seqNo) { status = OpVsLuceneDocStatus.OP_NEWER; } else if (op.seqNo() == docAndSeqNo.seqNo) { - assert localCheckpointTracker.hasProcessed(op.seqNo()) - || softDeleteEnabled == false : "local checkpoint tracker is not updated seq_no=" + op.seqNo() + " id=" + op.id(); + assert localCheckpointTracker.hasProcessed(op.seqNo()) : "local checkpoint tracker is not updated seq_no=" + + op.seqNo() + + " id=" + + op.id(); status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } else { status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; @@ -1208,7 +1165,7 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO versionMap.enforceSafeAccess(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = IndexingStrategy.processAsStaleOp(softDeleteEnabled, index.version()); + plan = IndexingStrategy.processAsStaleOp(index.version()); } else { plan = IndexingStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, index.version(), 0); } @@ -1385,7 +1342,6 @@ private void addDocs(final List docs, final IndexWriter i } private void addStaleDocs(final List docs, final IndexWriter indexWriter) throws IOException { - assert softDeleteEnabled : "Add history documents but soft-deletes is disabled"; for (ParseContext.Document doc : docs) { doc.add(softDeletesField); // soft-deleted every document before adding to Lucene } @@ -1414,10 +1370,10 @@ private IndexingStrategy( int reservedDocs, IndexResult earlyResultOnPreFlightError ) { - assert useLuceneUpdateDocument == false - || indexIntoLucene : "use lucene update is set to true, but we're not indexing into lucene"; - assert (indexIntoLucene - && earlyResultOnPreFlightError != null) == false : "can only index into lucene or have a preflight result but not both." + assert useLuceneUpdateDocument == false || indexIntoLucene + : "use lucene update is set to true, but we're not indexing into lucene"; + assert (indexIntoLucene && earlyResultOnPreFlightError != null) == false + : "can only index into lucene or have a preflight result but not both." + "indexIntoLucene: " + indexIntoLucene + " earlyResultOnPreFlightError:" @@ -1463,8 +1419,8 @@ public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDel return new IndexingStrategy(currentNotFoundOrDeleted, false, false, false, versionForIndexing, 0, null); } - static IndexingStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionForIndexing) { - return new IndexingStrategy(false, false, false, addStaleOpToLucene, versionForIndexing, 0, null); + static IndexingStrategy processAsStaleOp(long versionForIndexing) { + return new IndexingStrategy(false, false, false, true, versionForIndexing, 0, null); } static IndexingStrategy failAsTooManyDocs(Exception e) { @@ -1498,18 +1454,10 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele } private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { - if (softDeleteEnabled) { - if (docs.size() > 1) { - indexWriter.softUpdateDocuments(uid, docs, softDeletesField); - } else { - indexWriter.softUpdateDocument(uid, docs.get(0), softDeletesField); - } + if (docs.size() > 1) { + indexWriter.softUpdateDocuments(uid, docs, softDeletesField); } else { - if (docs.size() > 1) { - indexWriter.updateDocuments(uid, docs); - } else { - indexWriter.updateDocument(uid, docs.get(0)); - } + indexWriter.softUpdateDocument(uid, docs.get(0), softDeletesField); } numDocUpdates.inc(docs.size()); } @@ -1556,6 +1504,18 @@ public DeleteResult delete(Delete delete) throws IOException { if (plan.deleteFromLucene || plan.addStaleOpToLucene) { deleteResult = deleteInLucene(delete, plan); + if (plan.deleteFromLucene) { + numDocDeletes.inc(); + versionMap.putDeleteUnderLock( + delete.uid().bytes(), + new DeleteVersionValue( + plan.versionOfDeletion, + delete.seqNo(), + delete.primaryTerm(), + engineConfig.getThreadPool().relativeTimeInMillis() + ) + ); + } } else { deleteResult = new DeleteResult( plan.versionOfDeletion, @@ -1638,7 +1598,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws } else { final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, delete.version()); + plan = DeletionStrategy.processAsStaleOp(delete.version()); } else { plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version(), 0); } @@ -1710,37 +1670,19 @@ private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) throws IOException { assert assertMaxSeqNoOfUpdatesIsAdvanced(delete.uid(), delete.seqNo(), false, false); try { - if (softDeleteEnabled) { - final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.type(), delete.id()); - assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; - tombstone.updateSeqID(delete.seqNo(), delete.primaryTerm()); - tombstone.version().setLongValue(plan.versionOfDeletion); - final ParseContext.Document doc = tombstone.docs().get(0); - assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" - + doc - + " ]"; - doc.add(softDeletesField); - if (plan.addStaleOpToLucene || plan.currentlyDeleted) { - indexWriter.addDocument(doc); - } else { - indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); - } - } else if (plan.currentlyDeleted == false) { - // any exception that comes from this is a either an ACE or a fatal exception there - // can't be any document failures coming from this - indexWriter.deleteDocuments(delete.uid()); - } - if (plan.deleteFromLucene) { - numDocDeletes.inc(); - versionMap.putDeleteUnderLock( - delete.uid().bytes(), - new DeleteVersionValue( - plan.versionOfDeletion, - delete.seqNo(), - delete.primaryTerm(), - engineConfig.getThreadPool().relativeTimeInMillis() - ) - ); + final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.type(), delete.id()); + assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; + tombstone.updateSeqID(delete.seqNo(), delete.primaryTerm()); + tombstone.version().setLongValue(plan.versionOfDeletion); + final ParseContext.Document doc = tombstone.docs().get(0); + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + + doc + + " ]"; + doc.add(softDeletesField); + if (plan.addStaleOpToLucene || plan.currentlyDeleted) { + indexWriter.addDocument(doc); + } else { + indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); } return new DeleteResult(plan.versionOfDeletion, delete.primaryTerm(), delete.seqNo(), plan.currentlyDeleted == false); } catch (final Exception ex) { @@ -1779,8 +1721,8 @@ private DeletionStrategy( int reservedDocs, DeleteResult earlyResultOnPreflightError ) { - assert (deleteFromLucene - && earlyResultOnPreflightError != null) == false : "can only delete from lucene or have a preflight result but not both." + assert (deleteFromLucene && earlyResultOnPreflightError != null) == false + : "can only delete from lucene or have a preflight result but not both." + "deleteFromLucene: " + deleteFromLucene + " earlyResultOnPreFlightError:" @@ -1820,8 +1762,8 @@ public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, lo return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, 0, null); } - static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionOfDeletion) { - return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, 0, null); + static DeletionStrategy processAsStaleOp(long versionOfDeletion) { + return new DeletionStrategy(false, true, false, versionOfDeletion, 0, null); } static DeletionStrategy failAsTooManyDocs(Exception e) { @@ -1878,7 +1820,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { ); } else { markSeqNoAsSeen(noOp.seqNo()); - if (softDeleteEnabled && hasBeenProcessedBefore(noOp) == false) { + if (hasBeenProcessedBefore(noOp) == false) { try { final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newNoopTombstoneDoc(noOp.reason()); tombstone.updateSeqID(noOp.seqNo(), noOp.primaryTerm()); @@ -1888,9 +1830,8 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { tombstone.version().setLongValue(1L); assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]"; final ParseContext.Document doc = tombstone.docs().get(0); - assert doc.getField( - SeqNoFieldMapper.TOMBSTONE_NAME - ) != null : "Noop tombstone document but _tombstone field is not set [" + doc + " ]"; + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null + : "Noop tombstone document but _tombstone field is not set [" + doc + " ]"; doc.add(softDeletesField); indexWriter.addDocument(doc); } catch (final Exception ex) { @@ -1999,71 +1940,6 @@ public void writeIndexingBuffer() throws EngineException { refresh("write indexing buffer", SearcherScope.INTERNAL, false); } - @Override - public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) throws EngineException { - // best effort attempt before we acquire locks - ensureOpen(); - if (indexWriter.hasUncommittedChanges()) { - logger.trace("can't sync commit [{}]. have pending changes", syncId); - return SyncedFlushResult.PENDING_OPERATIONS; - } - if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) { - logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId); - return SyncedFlushResult.COMMIT_MISMATCH; - } - try (ReleasableLock lock = writeLock.acquire()) { - ensureOpen(); - ensureCanFlush(); - // lets do a refresh to make sure we shrink the version map. This refresh will be either a no-op (just shrink the version map) - // or we also have uncommitted changes and that causes this syncFlush to fail. - refresh("sync_flush", SearcherScope.INTERNAL, true); - if (indexWriter.hasUncommittedChanges()) { - logger.trace("can't sync commit [{}]. have pending changes", syncId); - return SyncedFlushResult.PENDING_OPERATIONS; - } - if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) { - logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId); - return SyncedFlushResult.COMMIT_MISMATCH; - } - logger.trace("starting sync commit [{}]", syncId); - commitIndexWriter(indexWriter, translog, syncId); - logger.debug("successfully sync committed. sync id [{}].", syncId); - lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - return SyncedFlushResult.SUCCESS; - } catch (IOException ex) { - maybeFailEngine("sync commit", ex); - throw new EngineException(shardId, "failed to sync commit", ex); - } - } - - final boolean tryRenewSyncCommit() { - boolean renewed = false; - try (ReleasableLock lock = writeLock.acquire()) { - ensureOpen(); - ensureCanFlush(); - String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); - long localCheckpointOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); - if (syncId != null - && indexWriter.hasUncommittedChanges() - && translog.estimateTotalOperationsFromMinSeq(localCheckpointOfLastCommit + 1) == 0) { - logger.trace("start renewing sync commit [{}]", syncId); - commitIndexWriter(indexWriter, translog, syncId); - logger.debug("successfully sync committed. sync id [{}].", syncId); - lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - renewed = true; - } - } catch (IOException ex) { - maybeFailEngine("renew sync commit", ex); - throw new EngineException(shardId, "failed to renew sync commit", ex); - } - if (renewed) { - // refresh outside of the write lock - // we have to refresh internal reader here to ensure we release unreferenced segments. - refresh("renew sync commit", SearcherScope.INTERNAL, true); - } - return renewed; - } - @Override public boolean shouldPeriodicallyFlush() { ensureOpen(); @@ -2103,9 +1979,9 @@ public boolean shouldPeriodicallyFlush() { } @Override - public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { + public void flush(boolean force, boolean waitIfOngoing) throws EngineException { if (engineConfig.isPrimary() == false) { - return new CommitId(lastCommittedSegmentInfos.getId()); + return; } ensureOpen(); if (force && waitIfOngoing == false) { @@ -2114,18 +1990,16 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing ); } - final byte[] newCommitId; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (flushLock.tryLock() == false) { // if we can't get the lock right away we block if needed otherwise barf - if (waitIfOngoing) { - logger.trace("waiting for in-flight flush to finish"); - flushLock.lock(); - logger.trace("acquired flush lock after blocking"); - } else { - return new CommitId(lastCommittedSegmentInfos.getId()); + if (waitIfOngoing == false) { + return; } + logger.trace("waiting for in-flight flush to finish"); + flushLock.lock(); + logger.trace("acquired flush lock after blocking"); } else { logger.trace("acquired flush lock immediately"); } @@ -2145,7 +2019,7 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti try { translog.rollGeneration(); logger.trace("starting commit for flush; commitTranslog=true"); - commitIndexWriter(indexWriter, translog, null); + commitIndexWriter(indexWriter, translog); logger.trace("finished commit for flush"); // a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved @@ -2168,7 +2042,6 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti refreshLastCommittedSegmentInfos(); } - newCommitId = lastCommittedSegmentInfos.getId(); } catch (FlushFailedEngineException ex) { maybeFailEngine("flush", ex); throw ex; @@ -2181,7 +2054,6 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti if (engineConfig.isEnableGcDeletes()) { pruneDeletedTombstones(); } - return new CommitId(newCommitId); } private void refreshLastCommittedSegmentInfos() { @@ -2353,9 +2225,7 @@ public void forceMerge( this.forceMergeUUID = forceMergeUUID; } if (flush) { - if (tryRenewSyncCommit() == false) { - flush(false, true); - } + flush(false, true); } if (upgrade) { logger.info("finished segment upgrade"); @@ -2550,9 +2420,8 @@ public List segments(boolean verbose) { @Override protected final void closeNoLock(String reason, CountDownLatch closedLatch) { if (isClosed.compareAndSet(false, true)) { - assert rwl.isWriteLockedByCurrentThread() - || failEngineLock - .isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself"; + assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() + : "Either the write lock must be held or the engine must be currently be failing itself"; try { this.versionMap.clear(); if (internalReaderManager != null) { @@ -2640,17 +2509,15 @@ private IndexWriterConfig getIndexWriterConfig() { MergePolicy mergePolicy = config().getMergePolicy(); // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes. iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); - if (softDeleteEnabled) { - mergePolicy = new RecoverySourcePruneMergePolicy( - SourceFieldMapper.RECOVERY_SOURCE_NAME, + mergePolicy = new RecoverySourcePruneMergePolicy( + SourceFieldMapper.RECOVERY_SOURCE_NAME, + softDeletesPolicy::getRetentionQuery, + new SoftDeletesRetentionMergePolicy( + Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, - new SoftDeletesRetentionMergePolicy( - Lucene.SOFT_DELETES_FIELD, - softDeletesPolicy::getRetentionQuery, - new PrunePostingsMergePolicy(mergePolicy, IdFieldMapper.NAME) - ) - ); - } + new PrunePostingsMergePolicy(mergePolicy, IdFieldMapper.NAME) + ) + ); boolean shuffleForcedMerge = Booleans.parseBoolean(System.getProperty("opensearch.shuffle_forced_merge", Boolean.TRUE.toString())); if (shuffleForcedMerge) { // We wrap the merge policy for all indices even though it is mostly useful for time-based indices @@ -2777,15 +2644,9 @@ public void onFailure(Exception e) { @Override protected void doRun() { - // if we have no pending merges and we are supposed to flush once merges have finished - // we try to renew a sync commit which is the case when we are having a big merge after we - // are inactive. If that didn't work we go and do a real flush which is ok since it only doesn't work - // if we either have records in the translog or if we don't have a sync ID at all... - // maybe even more important, we flush after all merges finish and we are inactive indexing-wise to + // if we have no pending merges and we are supposed to flush once merges have finished to // free up transient disk usage of the (presumably biggish) segments that were just merged - if (tryRenewSyncCommit() == false) { - flush(); - } + flush(); } }); } else if (merge.getTotalBytesSize() >= engineConfig.getIndexSettings().getFlushAfterMergeThresholdSize().getBytes()) { @@ -2822,10 +2683,8 @@ protected void doRun() throws Exception { * * @param writer the index writer to commit * @param translog the translog - * @param syncId the sync flush ID ({@code null} if not committing a synced flush) - * @throws IOException if an I/O exception occurs committing the specfied writer */ - protected void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { + protected void commitIndexWriter(final IndexWriter writer, final Translog translog) throws IOException { ensureCanFlush(); try { final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); @@ -2842,15 +2701,10 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl final Map commitData = new HashMap<>(7); commitData.put(Translog.TRANSLOG_UUID_KEY, translog.getTranslogUUID()); commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint)); - if (syncId != null) { - commitData.put(Engine.SYNC_COMMIT_ID, syncId); - } commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); commitData.put(HISTORY_UUID_KEY, historyUUID); - if (softDeleteEnabled) { - commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); - } + commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); final String currentForceMergeUUID = forceMergeUUID; if (currentForceMergeUUID != null) { commitData.put(FORCE_MERGE_UUID_KEY, currentForceMergeUUID); @@ -3018,38 +2872,14 @@ long getNumDocUpdates() { return numDocUpdates.count(); } - private void ensureSoftDeletesEnabled() { - if (softDeleteEnabled == false) { - assert false : "index " + shardId.getIndex() + " does not have soft-deletes enabled"; - throw new IllegalStateException("index " + shardId.getIndex() + " does not have soft-deletes enabled"); - } - } - @Override public Translog.Snapshot newChangesSnapshot( String source, - HistorySource historySource, MapperService mapperService, long fromSeqNo, long toSeqNo, boolean requiredFullRange ) throws IOException { - if (historySource == HistorySource.INDEX) { - return newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange); - } else { - return getTranslog().newSnapshot(fromSeqNo, toSeqNo, requiredFullRange); - } - } - - @Override - public Translog.Snapshot newChangesSnapshot( - String source, - MapperService mapperService, - long fromSeqNo, - long toSeqNo, - boolean requiredFullRange - ) throws IOException { - ensureSoftDeletesEnabled(); ensureOpen(); refreshIfNeeded(source, toSeqNo); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); @@ -3076,29 +2906,8 @@ public Translog.Snapshot newChangesSnapshot( } } - @Override - public boolean hasCompleteOperationHistory(String reason, HistorySource historySource, MapperService mapperService, long startingSeqNo) - throws IOException { - if (historySource == HistorySource.INDEX) { - ensureSoftDeletesEnabled(); - return getMinRetainedSeqNo() <= startingSeqNo; - } else { - final long currentLocalCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); - // avoid scanning translog if not necessary - if (startingSeqNo > currentLocalCheckpoint) { - return true; - } - final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); - try (Translog.Snapshot snapshot = getTranslog().newSnapshot(startingSeqNo, Long.MAX_VALUE)) { - Translog.Operation operation; - while ((operation = snapshot.next()) != null) { - if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - tracker.markSeqNoAsProcessed(operation.seqNo()); - } - } - } - return tracker.getProcessedCheckpoint() >= currentLocalCheckpoint; - } + public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) { + return getMinRetainedSeqNo() <= startingSeqNo; } /** @@ -3106,18 +2915,11 @@ public boolean hasCompleteOperationHistory(String reason, HistorySource historyS * Operations whose seq# are at least this value should exist in the Lucene index. */ public final long getMinRetainedSeqNo() { - ensureSoftDeletesEnabled(); return softDeletesPolicy.getMinRetainedSeqNo(); } - @Override - public Closeable acquireHistoryRetentionLock(HistorySource historySource) { - if (historySource == HistorySource.INDEX) { - ensureSoftDeletesEnabled(); - return softDeletesPolicy.acquireRetentionLock(); - } else { - return translog.acquireRetentionLock(); - } + public Closeable acquireHistoryRetentionLock() { + return softDeletesPolicy.acquireRetentionLock(); } /** @@ -3131,15 +2933,14 @@ private static Map commitDataAsMap(final IndexWriter indexWriter return commitData; } - private final class AssertingIndexWriter extends IndexWriter { + private static class AssertingIndexWriter extends IndexWriter { AssertingIndexWriter(Directory d, IndexWriterConfig conf) throws IOException { super(d, conf); } @Override - public long deleteDocuments(Term... terms) throws IOException { - assert softDeleteEnabled == false : "Call #deleteDocuments but soft-deletes is enabled"; - return super.deleteDocuments(terms); + public long updateDocuments(Term delTerm, Iterable> docs) { + throw new AssertionError("must not hard update documents"); } @Override diff --git a/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java b/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java index c1c3f37e1bbe9..da2365a94aa8a 100644 --- a/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java +++ b/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java @@ -35,7 +35,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.function.BiConsumer; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -62,8 +61,6 @@ class OpenSearchReaderManager extends ReferenceManager { protected static Logger logger = LogManager.getLogger(OpenSearchReaderManager.class); - private final BiConsumer refreshListener; - private volatile SegmentInfos currentInfos; /** @@ -72,15 +69,9 @@ class OpenSearchReaderManager extends ReferenceManager refreshListener - ) { + OpenSearchReaderManager(OpenSearchDirectoryReader reader) { this.current = reader; - this.refreshListener = refreshListener; - refreshListener.accept(current, null); } @Override @@ -111,9 +102,6 @@ protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader re reader = OpenSearchDirectoryReader.wrap(innerReader, referenceToRefresh.shardId()); logger.trace("updated to SegmentInfosVersion=" + currentInfos.getVersion() + " reader=" + innerReader); } - if (reader != null) { - refreshListener.accept(reader, referenceToRefresh); - } return reader; } diff --git a/server/src/main/java/org/opensearch/index/engine/RamAccountingRefreshListener.java b/server/src/main/java/org/opensearch/index/engine/RamAccountingRefreshListener.java deleted file mode 100644 index beba2ff25304e..0000000000000 --- a/server/src/main/java/org/opensearch/index/engine/RamAccountingRefreshListener.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index.engine; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SegmentReader; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; -import org.opensearch.indices.breaker.CircuitBreakerService; - -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.function.BiConsumer; - -/** - * A refresh listener that tracks the amount of memory used by segments in the accounting circuit breaker. - */ -final class RamAccountingRefreshListener implements BiConsumer { - - private final CircuitBreakerService breakerService; - - RamAccountingRefreshListener(CircuitBreakerService breakerService) { - this.breakerService = breakerService; - } - - @Override - public void accept(OpenSearchDirectoryReader reader, OpenSearchDirectoryReader previousReader) { - final CircuitBreaker breaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - - // Construct a list of the previous segment readers, we only want to track memory used - // by new readers, so these will be exempted from the circuit breaking accounting. - // - // The Core CacheKey is used as the key for the set so that deletions still keep the correct - // accounting, as using the Reader or Reader's CacheKey causes incorrect accounting. - final Set prevReaders; - if (previousReader == null) { - prevReaders = Collections.emptySet(); - } else { - final List previousReaderLeaves = previousReader.leaves(); - prevReaders = new HashSet<>(previousReaderLeaves.size()); - for (LeafReaderContext lrc : previousReaderLeaves) { - prevReaders.add(Lucene.segmentReader(lrc.reader()).getCoreCacheHelper().getKey()); - } - } - - for (LeafReaderContext lrc : reader.leaves()) { - final SegmentReader segmentReader = Lucene.segmentReader(lrc.reader()); - // don't add the segment's memory unless it is not referenced by the previous reader - // (only new segments) - if (prevReaders.contains(segmentReader.getCoreCacheHelper().getKey()) == false) { - final long ramBytesUsed = segmentReader.ramBytesUsed(); - // add the segment memory to the breaker (non-breaking) - breaker.addWithoutBreaking(ramBytesUsed); - // and register a listener for when the segment is closed to decrement the - // breaker accounting - segmentReader.getCoreCacheHelper().addClosedListener(k -> breaker.addWithoutBreaking(-ramBytesUsed)); - } - } - } -} diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 4f7775405cb50..e936ae10b04ea 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -85,7 +85,6 @@ public class ReadOnlyEngine extends Engine { private final OpenSearchReaderManager readerManager; private final IndexCommit indexCommit; private final Lock indexWriterLock; - private final RamAccountingRefreshListener refreshListener; private final SafeCommitInfo safeCommitInfo; private final CompletionStatsCache completionStatsCache; private final boolean requireCompleteHistory; @@ -114,7 +113,6 @@ public ReadOnlyEngine( boolean requireCompleteHistory ) { super(config); - this.refreshListener = new RamAccountingRefreshListener(engineConfig.getCircuitBreakerService()); this.requireCompleteHistory = requireCompleteHistory; try { Store store = config.getStore(); @@ -135,14 +133,13 @@ public ReadOnlyEngine( this.seqNoStats = seqNoStats; this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory); reader = wrapReader(open(indexCommit), readerWrapperFunction); - readerManager = new OpenSearchReaderManager(reader, refreshListener); + readerManager = new OpenSearchReaderManager(reader); assert translogStats != null || obtainLock : "mutiple translogs instances should not be opened at the same time"; this.translogStats = translogStats != null ? translogStats : translogStats(config, lastCommittedSegmentInfos); this.indexWriterLock = indexWriterLock; this.safeCommitInfo = new SafeCommitInfo(seqNoStats.getLocalCheckpoint(), lastCommittedSegmentInfos.totalMaxDoc()); completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); - // no need to register a refresh listener to invalidate completionStatsCache since this engine is readonly success = true; } finally { @@ -200,16 +197,13 @@ protected final OpenSearchDirectoryReader wrapReader( DirectoryReader reader, Function readerWrapperFunction ) throws IOException { - if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); - } reader = readerWrapperFunction.apply(reader); return OpenSearchDirectoryReader.wrap(reader, engineConfig.getShardId()); } protected DirectoryReader open(IndexCommit commit) throws IOException { assert Transports.assertNotTransportThread("opening index commit of a read-only engine"); - return DirectoryReader.open(commit); + return new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(commit), Lucene.SOFT_DELETES_FIELD); } @Override @@ -332,7 +326,7 @@ public boolean ensureTranslogSynced(Stream locations) { public void syncTranslog() {} @Override - public Closeable acquireHistoryRetentionLock(HistorySource historySource) { + public Closeable acquireHistoryRetentionLock() { return () -> {}; } @@ -343,40 +337,11 @@ public Translog.Snapshot newChangesSnapshot( long fromSeqNo, long toSeqNo, boolean requiredFullRange - ) throws IOException { - if (engineConfig.getIndexSettings().isSoftDeleteEnabled() == false) { - throw new IllegalStateException("accessing changes snapshot requires soft-deletes enabled"); - } - return newEmptySnapshot(); - } - - @Override - public Translog.Snapshot readHistoryOperations( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo ) { return newEmptySnapshot(); } - @Override - public int estimateNumberOfHistoryOperations( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo - ) { - return 0; - } - - @Override - public boolean hasCompleteOperationHistory( - String reason, - HistorySource historySource, - MapperService mapperService, - long startingSeqNo - ) { + public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) { // we can do operation-based recovery if we don't have to replay any operation. return startingSeqNo > seqNoStats.getMaxSeqNo(); } @@ -441,15 +406,7 @@ public boolean shouldPeriodicallyFlush() { } @Override - public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) { - // we can't do synced flushes this would require an indexWriter which we don't have - throw new UnsupportedOperationException("syncedFlush is not supported on a read-only engine"); - } - - @Override - public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { - return new CommitId(lastCommittedSegmentInfos.getId()); - } + public void flush(boolean force, boolean waitIfOngoing) throws EngineException {} @Override public void forceMerge( @@ -531,10 +488,6 @@ public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { } - protected void processReader(OpenSearchDirectoryReader reader) { - refreshListener.accept(reader, null); - } - @Override public boolean refreshNeeded() { return false; diff --git a/server/src/main/java/org/opensearch/index/engine/Segment.java b/server/src/main/java/org/opensearch/index/engine/Segment.java index 1ef3a2f94a8e1..2b824c847f75f 100644 --- a/server/src/main/java/org/opensearch/index/engine/Segment.java +++ b/server/src/main/java/org/opensearch/index/engine/Segment.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -66,11 +67,12 @@ public class Segment implements Writeable { public org.apache.lucene.util.Version version = null; public Boolean compound = null; public String mergeId; - public long memoryInBytes; public Sort segmentSort; public Accountable ramTree = null; public Map attributes; + private static final ByteSizeValue ZERO_BYTE_SIZE_VALUE = new ByteSizeValue(0L); + public Segment(StreamInput in) throws IOException { name = in.readString(); generation = Long.parseLong(name.substring(1), Character.MAX_RADIX); @@ -82,7 +84,11 @@ public Segment(StreamInput in) throws IOException { version = Lucene.parseVersionLenient(in.readOptionalString(), null); compound = in.readOptionalBoolean(); mergeId = in.readOptionalString(); - memoryInBytes = in.readLong(); + // the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387) + // retain for bwc only (todo: remove in OpenSearch 3) + if (in.getVersion().before(Version.V_2_0_0)) { + in.readLong(); // estimated memory + } if (in.readBoolean()) { // verbose mode ramTree = readRamTree(in); @@ -145,10 +151,13 @@ public String getMergeId() { } /** - * Estimation of the memory usage used by a segment. + * Estimation of the memory usage was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387) + * retain for bwc only (todo: remove in OpenSearch 3). + * @deprecated */ - public long getMemoryInBytes() { - return this.memoryInBytes; + @Deprecated + public ByteSizeValue getZeroMemory() { + return ZERO_BYTE_SIZE_VALUE; } /** @@ -193,7 +202,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(version.toString()); out.writeOptionalBoolean(compound); out.writeOptionalString(mergeId); - out.writeLong(memoryInBytes); + // the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387) + // retain for bwc only (todo: remove in OpenSearch 3) + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeLong(0L); + } boolean verbose = ramTree != null; out.writeBoolean(verbose); @@ -350,8 +363,6 @@ public String toString() { + ", mergeId='" + mergeId + '\'' - + ", memoryInBytes=" - + memoryInBytes + (segmentSort != null ? ", sort=" + segmentSort : "") + ", attributes=" + attributes diff --git a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java index 0db8ca7b94425..880091c192034 100644 --- a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java @@ -33,6 +33,7 @@ package org.opensearch.index.engine; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.opensearch.Version; import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -46,19 +47,14 @@ public class SegmentsStats implements Writeable, ToXContentFragment { private long count; - private long memoryInBytes; - private long termsMemoryInBytes; - private long storedFieldsMemoryInBytes; - private long termVectorsMemoryInBytes; - private long normsMemoryInBytes; - private long pointsMemoryInBytes; - private long docValuesMemoryInBytes; private long indexWriterMemoryInBytes; private long versionMapMemoryInBytes; private long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; private long bitsetMemoryInBytes; private ImmutableOpenMap fileSizes = ImmutableOpenMap.of(); + private static final ByteSizeValue ZERO_BYTE_SIZE_VALUE = new ByteSizeValue(0L); + /* * A map to provide a best-effort approach describing Lucene index files. * @@ -91,13 +87,17 @@ public SegmentsStats() {} public SegmentsStats(StreamInput in) throws IOException { count = in.readVLong(); - memoryInBytes = in.readLong(); - termsMemoryInBytes = in.readLong(); - storedFieldsMemoryInBytes = in.readLong(); - termVectorsMemoryInBytes = in.readLong(); - normsMemoryInBytes = in.readLong(); - pointsMemoryInBytes = in.readLong(); - docValuesMemoryInBytes = in.readLong(); + // the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387) + // retain for bwc only (todo: remove in OpenSearch 3) + if (in.getVersion().before(Version.V_2_0_0)) { + in.readLong(); // estimated segment memory + in.readLong(); // estimated terms memory + in.readLong(); // estimated stored fields memory + in.readLong(); // estimated term vector memory + in.readLong(); // estimated norms memory + in.readLong(); // estimated points memory + in.readLong(); // estimated doc values memory + } indexWriterMemoryInBytes = in.readLong(); versionMapMemoryInBytes = in.readLong(); bitsetMemoryInBytes = in.readLong(); @@ -113,33 +113,8 @@ public SegmentsStats(StreamInput in) throws IOException { fileSizes = map.build(); } - public void add(long count, long memoryInBytes) { + public void add(long count) { this.count += count; - this.memoryInBytes += memoryInBytes; - } - - public void addTermsMemoryInBytes(long termsMemoryInBytes) { - this.termsMemoryInBytes += termsMemoryInBytes; - } - - public void addStoredFieldsMemoryInBytes(long storedFieldsMemoryInBytes) { - this.storedFieldsMemoryInBytes += storedFieldsMemoryInBytes; - } - - public void addTermVectorsMemoryInBytes(long termVectorsMemoryInBytes) { - this.termVectorsMemoryInBytes += termVectorsMemoryInBytes; - } - - public void addNormsMemoryInBytes(long normsMemoryInBytes) { - this.normsMemoryInBytes += normsMemoryInBytes; - } - - public void addPointsMemoryInBytes(long pointsMemoryInBytes) { - this.pointsMemoryInBytes += pointsMemoryInBytes; - } - - public void addDocValuesMemoryInBytes(long docValuesMemoryInBytes) { - this.docValuesMemoryInBytes += docValuesMemoryInBytes; } public void addIndexWriterMemoryInBytes(long indexWriterMemoryInBytes) { @@ -178,13 +153,7 @@ public void add(SegmentsStats mergeStats) { return; } updateMaxUnsafeAutoIdTimestamp(mergeStats.maxUnsafeAutoIdTimestamp); - add(mergeStats.count, mergeStats.memoryInBytes); - addTermsMemoryInBytes(mergeStats.termsMemoryInBytes); - addStoredFieldsMemoryInBytes(mergeStats.storedFieldsMemoryInBytes); - addTermVectorsMemoryInBytes(mergeStats.termVectorsMemoryInBytes); - addNormsMemoryInBytes(mergeStats.normsMemoryInBytes); - addPointsMemoryInBytes(mergeStats.pointsMemoryInBytes); - addDocValuesMemoryInBytes(mergeStats.docValuesMemoryInBytes); + add(mergeStats.count); addIndexWriterMemoryInBytes(mergeStats.indexWriterMemoryInBytes); addVersionMapMemoryInBytes(mergeStats.versionMapMemoryInBytes); addBitsetMemoryInBytes(mergeStats.bitsetMemoryInBytes); @@ -198,83 +167,6 @@ public long getCount() { return this.count; } - /** - * Estimation of the memory usage used by a segment. - */ - public long getMemoryInBytes() { - return this.memoryInBytes; - } - - public ByteSizeValue getMemory() { - return new ByteSizeValue(memoryInBytes); - } - - /** - * Estimation of the terms dictionary memory usage by a segment. - */ - public long getTermsMemoryInBytes() { - return this.termsMemoryInBytes; - } - - private ByteSizeValue getTermsMemory() { - return new ByteSizeValue(termsMemoryInBytes); - } - - /** - * Estimation of the stored fields memory usage by a segment. - */ - public long getStoredFieldsMemoryInBytes() { - return this.storedFieldsMemoryInBytes; - } - - private ByteSizeValue getStoredFieldsMemory() { - return new ByteSizeValue(storedFieldsMemoryInBytes); - } - - /** - * Estimation of the term vectors memory usage by a segment. - */ - public long getTermVectorsMemoryInBytes() { - return this.termVectorsMemoryInBytes; - } - - private ByteSizeValue getTermVectorsMemory() { - return new ByteSizeValue(termVectorsMemoryInBytes); - } - - /** - * Estimation of the norms memory usage by a segment. - */ - public long getNormsMemoryInBytes() { - return this.normsMemoryInBytes; - } - - private ByteSizeValue getNormsMemory() { - return new ByteSizeValue(normsMemoryInBytes); - } - - /** - * Estimation of the points memory usage by a segment. - */ - public long getPointsMemoryInBytes() { - return this.pointsMemoryInBytes; - } - - private ByteSizeValue getPointsMemory() { - return new ByteSizeValue(pointsMemoryInBytes); - } - - /** - * Estimation of the doc values memory usage by a segment. - */ - public long getDocValuesMemoryInBytes() { - return this.docValuesMemoryInBytes; - } - - private ByteSizeValue getDocValuesMemory() { - return new ByteSizeValue(docValuesMemoryInBytes); - } - /** * Estimation of the memory usage by index writer */ @@ -324,13 +216,13 @@ public long getMaxUnsafeAutoIdTimestamp() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.SEGMENTS); builder.field(Fields.COUNT, count); - builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, getMemory()); - builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, getTermsMemory()); - builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, getStoredFieldsMemory()); - builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, getTermVectorsMemory()); - builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, getNormsMemory()); - builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, getPointsMemory()); - builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, getDocValuesMemory()); + builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, ZERO_BYTE_SIZE_VALUE); + builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, ZERO_BYTE_SIZE_VALUE); + builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, ZERO_BYTE_SIZE_VALUE); + builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, ZERO_BYTE_SIZE_VALUE); + builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, ZERO_BYTE_SIZE_VALUE); + builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, ZERO_BYTE_SIZE_VALUE); + builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, ZERO_BYTE_SIZE_VALUE); builder.humanReadableField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, getIndexWriterMemory()); builder.humanReadableField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, getVersionMapMemory()); builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); @@ -380,13 +272,17 @@ static final class Fields { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(count); - out.writeLong(memoryInBytes); - out.writeLong(termsMemoryInBytes); - out.writeLong(storedFieldsMemoryInBytes); - out.writeLong(termVectorsMemoryInBytes); - out.writeLong(normsMemoryInBytes); - out.writeLong(pointsMemoryInBytes); - out.writeLong(docValuesMemoryInBytes); + if (out.getVersion().before(Version.V_2_0_0)) { + // the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387) + // retain the following for bwc only (todo: remove in OpenSearch 3) + out.writeLong(0L); // estimated memory + out.writeLong(0L); // estimated terms memory + out.writeLong(0L); // estimated stored fields memory + out.writeLong(0L); // estimated term vector memory + out.writeLong(0L); // estimated norms memory + out.writeLong(0L); // estimated points memory + out.writeLong(0L); // estimated doc values memory + } out.writeLong(indexWriterMemoryInBytes); out.writeLong(versionMapMemoryInBytes); out.writeLong(bitsetMemoryInBytes); @@ -402,4 +298,14 @@ public void writeTo(StreamOutput out) throws IOException { public void clearFileSizes() { fileSizes = ImmutableOpenMap.of(); } + + /** + * Used only for deprecating memory tracking in REST interface + * todo remove in OpenSearch 3.0 + * @deprecated + */ + @Deprecated + public ByteSizeValue getZeroMemory() { + return ZERO_BYTE_SIZE_VALUE; + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java index bf641a91f377b..787401386c120 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java @@ -33,9 +33,9 @@ package org.opensearch.index.fielddata.plain; import org.apache.lucene.index.BinaryDocValues; -import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.io.stream.BytesStreamInput; import org.opensearch.index.fielddata.LeafFieldData; import org.opensearch.index.fielddata.SortedBinaryDocValues; @@ -66,7 +66,7 @@ public SortedBinaryDocValues getBytesValues() { return new SortedBinaryDocValues() { int count; - final ByteArrayDataInput in = new ByteArrayDataInput(); + final BytesStreamInput in = new BytesStreamInput(); final BytesRef scratch = new BytesRef(); @Override diff --git a/server/src/main/java/org/opensearch/index/get/GetResult.java b/server/src/main/java/org/opensearch/index/get/GetResult.java index 84417f876d2f9..aa2bf43c5b290 100644 --- a/server/src/main/java/org/opensearch/index/get/GetResult.java +++ b/server/src/main/java/org/opensearch/index/get/GetResult.java @@ -34,6 +34,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.compress.CompressorFactory; @@ -65,7 +66,6 @@ public class GetResult implements Writeable, Iterable, ToXContentObject { public static final String _INDEX = "_index"; - public static final String _TYPE = "_type"; public static final String _ID = "_id"; private static final String _VERSION = "_version"; private static final String _SEQ_NO = "_seq_no"; @@ -74,7 +74,6 @@ public class GetResult implements Writeable, Iterable, ToXContent private static final String FIELDS = "fields"; private String index; - private String type; private String id; private long version; private long seqNo; @@ -88,7 +87,9 @@ public class GetResult implements Writeable, Iterable, ToXContent public GetResult(StreamInput in) throws IOException { index = in.readString(); - type = in.readOptionalString(); + if (in.getVersion().before(Version.V_2_0_0)) { + in.readOptionalString(); + } id = in.readString(); seqNo = in.readZLong(); primaryTerm = in.readVLong(); @@ -121,7 +122,6 @@ public GetResult(StreamInput in) throws IOException { public GetResult( String index, - String type, String id, long seqNo, long primaryTerm, @@ -132,7 +132,6 @@ public GetResult( Map metaFields ) { this.index = index; - this.type = type; this.id = id; this.seqNo = seqNo; this.primaryTerm = primaryTerm; @@ -140,8 +139,8 @@ public GetResult( + seqNo + " primaryTerm: " + primaryTerm; - assert exists - || (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) : "doc not found but seqNo/primaryTerm are set"; + assert exists || (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) + : "doc not found but seqNo/primaryTerm are set"; this.version = version; this.exists = exists; this.source = source; @@ -163,13 +162,6 @@ public String getIndex() { return index; } - /** - * The type of the document. - */ - public String getType() { - return type; - } - /** * The id of the document. */ @@ -337,7 +329,6 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(_INDEX, index); - builder.field(_TYPE, type); builder.field(_ID, id); if (isExists()) { if (version != -1) { @@ -354,10 +345,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static GetResult fromXContentEmbedded(XContentParser parser) throws IOException { XContentParser.Token token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - return fromXContentEmbedded(parser, null, null, null); + return fromXContentEmbedded(parser, null, null); } - public static GetResult fromXContentEmbedded(XContentParser parser, String index, String type, String id) throws IOException { + public static GetResult fromXContentEmbedded(XContentParser parser, String index, String id) throws IOException { XContentParser.Token token = parser.currentToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); @@ -375,8 +366,6 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index } else if (token.isValue()) { if (_INDEX.equals(currentFieldName)) { index = parser.text(); - } else if (_TYPE.equals(currentFieldName)) { - type = parser.text(); } else if (_ID.equals(currentFieldName)) { id = parser.text(); } else if (_VERSION.equals(currentFieldName)) { @@ -414,7 +403,7 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index } } } - return new GetResult(index, type, id, seqNo, primaryTerm, version, found, source, documentFields, metaFields); + return new GetResult(index, id, seqNo, primaryTerm, version, found, source, documentFields, metaFields); } public static GetResult fromXContent(XContentParser parser) throws IOException { @@ -442,7 +431,9 @@ private Map readFields(StreamInput in) throws IOException @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - out.writeOptionalString(type); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); + } out.writeString(id); out.writeZLong(seqNo); out.writeVLong(primaryTerm); @@ -484,7 +475,6 @@ public boolean equals(Object o) { && primaryTerm == getResult.primaryTerm && exists == getResult.exists && Objects.equals(index, getResult.index) - && Objects.equals(type, getResult.type) && Objects.equals(id, getResult.id) && Objects.equals(documentFields, getResult.documentFields) && Objects.equals(metaFields, getResult.metaFields) @@ -493,7 +483,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(version, seqNo, primaryTerm, exists, index, type, id, documentFields, metaFields, sourceAsMap()); + return Objects.hash(version, seqNo, primaryTerm, exists, index, id, documentFields, metaFields, sourceAsMap()); } @Override diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java index 992a53e9ae0da..8cf315e2fffa8 100644 --- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java @@ -107,7 +107,6 @@ public GetStats stats() { } public GetResult get( - String type, String id, String[] gFields, boolean realtime, @@ -115,11 +114,10 @@ public GetResult get( VersionType versionType, FetchSourceContext fetchSourceContext ) { - return get(type, id, gFields, realtime, version, versionType, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, fetchSourceContext); + return get(id, gFields, realtime, version, versionType, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, fetchSourceContext); } private GetResult get( - String type, String id, String[] gFields, boolean realtime, @@ -132,7 +130,7 @@ private GetResult get( currentMetric.inc(); try { long now = System.nanoTime(); - GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, ifSeqNo, ifPrimaryTerm, fetchSourceContext); + GetResult getResult = innerGet(id, gFields, realtime, version, versionType, ifSeqNo, ifPrimaryTerm, fetchSourceContext); if (getResult.isExists()) { existsMetric.inc(System.nanoTime() - now); @@ -145,9 +143,8 @@ private GetResult get( } } - public GetResult getForUpdate(String type, String id, long ifSeqNo, long ifPrimaryTerm) { + public GetResult getForUpdate(String id, long ifSeqNo, long ifPrimaryTerm) { return get( - type, id, new String[] { RoutingFieldMapper.NAME }, true, @@ -166,16 +163,16 @@ public GetResult getForUpdate(String type, String id, long ifSeqNo, long ifPrima *

    * Note: Call must release engine searcher associated with engineGetResult! */ - public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) { + public GetResult get(Engine.GetResult engineGetResult, String id, String[] fields, FetchSourceContext fetchSourceContext) { if (!engineGetResult.exists()) { - return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); + return new GetResult(shardId.getIndexName(), id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); } currentMetric.inc(); try { long now = System.nanoTime(); fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, fields); - GetResult getResult = innerGetLoadFromStoredFields(type, id, fields, fetchSourceContext, engineGetResult, mapperService); + GetResult getResult = innerGetLoadFromStoredFields(id, fields, fetchSourceContext, engineGetResult, mapperService); if (getResult.isExists()) { existsMetric.inc(System.nanoTime() - now); } else { @@ -206,7 +203,6 @@ private FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceCont } private GetResult innerGet( - String type, String id, String[] gFields, boolean realtime, @@ -217,40 +213,31 @@ private GetResult innerGet( FetchSourceContext fetchSourceContext ) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); - if (type == null || type.equals("_all")) { - DocumentMapper mapper = mapperService.documentMapper(); - type = mapper == null ? null : mapper.type(); - } - Engine.GetResult get = null; - if (type != null) { - Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); - get = indexShard.get( - new Engine.Get(realtime, realtime, type, id, uidTerm).version(version) - .versionType(versionType) - .setIfSeqNo(ifSeqNo) - .setIfPrimaryTerm(ifPrimaryTerm) - ); - assert get.isFromTranslog() == false || realtime : "should only read from translog if realtime enabled"; - if (get.exists() == false) { - get.close(); - } + Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); + Engine.GetResult get = indexShard.get( + new Engine.Get(realtime, true, id, uidTerm).version(version) + .versionType(versionType) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm) + ); + if (get.exists() == false) { + get.close(); } if (get == null || get.exists() == false) { - return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); + return new GetResult(shardId.getIndexName(), id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); } try { // break between having loaded it from translog (so we only have _source), and having a document to load - return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, mapperService); + return innerGetLoadFromStoredFields(id, gFields, fetchSourceContext, get, mapperService); } finally { get.close(); } } private GetResult innerGetLoadFromStoredFields( - String type, String id, String[] storedFields, FetchSourceContext fetchSourceContext, @@ -289,7 +276,7 @@ private GetResult innerGetLoadFromStoredFields( try { docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor); } catch (IOException e) { - throw new OpenSearchException("Failed to get type [" + type + "] and id [" + id + "]", e); + throw new OpenSearchException("Failed to get id [" + id + "]", e); } source = fieldVisitor.source(); @@ -308,7 +295,7 @@ private GetResult innerGetLoadFromStoredFields( assert source != null : "original source in translog must exist"; SourceToParse sourceToParse = new SourceToParse( shardId.getIndexName(), - type, + MapperService.SINGLE_MAPPING_NAME, id, source, XContentHelper.xContentType(source), @@ -417,13 +404,12 @@ private GetResult innerGetLoadFromStoredFields( try { source = BytesReference.bytes(XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap)); } catch (IOException e) { - throw new OpenSearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e); + throw new OpenSearchException("Failed to get id [" + id + "] with includes/excludes set", e); } } return new GetResult( shardId.getIndexName(), - type, id, get.docIdAndVersion().seqNo, get.docIdAndVersion().primaryTerm, diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java index df81984794cec..0d90e694ef481 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java @@ -284,19 +284,19 @@ public Query termQuery(Object value, QueryShardContext context) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { String geoFormat = format != null ? format : GeoJsonGeometryFormat.NAME; Function valueParser = value -> geometryParser.parseAndFormatObject(value, geoFormat); if (parsesArrayValue) { - return new ArraySourceValueFetcher(name(), mapperService) { + return new ArraySourceValueFetcher(name(), context) { @Override protected Object parseSourceValue(Object value) { return valueParser.apply(value); } }; } else { - return new SourceValueFetcher(name(), mapperService) { + return new SourceValueFetcher(name(), context) { @Override protected Object parseSourceValue(Object value) { return valueParser.apply(value); diff --git a/server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java index 6059a65c2b36e..634424d6f45a4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java @@ -100,7 +100,7 @@ static final class AllFieldType extends StringFieldType { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java index e74e019e3d6c2..f28e4a64eec6e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java @@ -33,6 +33,7 @@ package org.opensearch.index.mapper; import org.opensearch.common.Nullable; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SourceLookup; import java.util.ArrayList; @@ -51,17 +52,17 @@ public abstract class ArraySourceValueFetcher implements ValueFetcher { private final Set sourcePaths; private final @Nullable Object nullValue; - public ArraySourceValueFetcher(String fieldName, MapperService mapperService) { - this(fieldName, mapperService, null); + public ArraySourceValueFetcher(String fieldName, QueryShardContext context) { + this(fieldName, context, null); } /** * @param fieldName The name of the field. - * @param mapperService A mapper service. + * @param context A query shard context. * @param nullValue A optional substitute value if the _source value is 'null'. */ - public ArraySourceValueFetcher(String fieldName, MapperService mapperService, Object nullValue) { - this.sourcePaths = mapperService.sourcePath(fieldName); + public ArraySourceValueFetcher(String fieldName, QueryShardContext context, Object nullValue) { + this.sourcePaths = context.sourcePath(fieldName); this.nullValue = nullValue; } diff --git a/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java index d2a34064987ca..68b0df63f2bef 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java @@ -35,11 +35,11 @@ import com.carrotsearch.hppc.ObjectArrayList; import org.apache.lucene.document.StoredField; import org.apache.lucene.search.Query; -import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexFieldData; @@ -117,8 +117,8 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.identity(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.identity(name(), context, format); } @Override @@ -240,8 +240,7 @@ public BytesRef binaryValue() { try { CollectionUtils.sortAndDedup(bytesList); int size = bytesList.size(); - final byte[] bytes = new byte[totalSize + (size + 1) * 5]; - ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); + BytesStreamOutput out = new BytesStreamOutput(totalSize + (size + 1) * 5); out.writeVInt(size); // write total number of values for (int i = 0; i < size; i++) { final byte[] value = bytesList.get(i); @@ -249,7 +248,7 @@ public BytesRef binaryValue() { out.writeVInt(valueLength); out.writeBytes(value, 0, valueLength); } - return new BytesRef(bytes, 0, out.getPosition()); + return out.bytes().toBytesRef(); } catch (IOException e) { throw new OpenSearchException("Failed to get binary value", e); } diff --git a/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java b/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java index ad54840888a46..96744ba2d01d8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java +++ b/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java @@ -33,11 +33,11 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.TriFunction; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; import java.io.IOException; import java.net.InetAddress; @@ -47,13 +47,12 @@ import java.util.List; import java.util.Set; -enum BinaryRangeUtil { +final class BinaryRangeUtil { - ; + private BinaryRangeUtil() {}; static BytesRef encodeIPRanges(Set ranges) throws IOException { - final byte[] encoded = new byte[5 + (16 * 2) * ranges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + final BytesStreamOutput out = new BytesStreamOutput(5 + (16 * 2) * ranges.size()); out.writeVInt(ranges.size()); for (RangeFieldMapper.Range range : ranges) { InetAddress fromValue = (InetAddress) range.from; @@ -64,10 +63,10 @@ static BytesRef encodeIPRanges(Set ranges) throws IOExce byte[] encodedToValue = InetAddressPoint.encode(toValue); out.writeBytes(encodedToValue, 0, encodedToValue.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } - static List decodeIPRanges(BytesRef encodedRanges) { + static List decodeIPRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.IP, BinaryRangeUtil::decodeIP); } @@ -83,8 +82,7 @@ static BytesRef encodeLongRanges(Set ranges) throws IOEx Comparator toComparator = Comparator.comparingLong(range -> ((Number) range.to).longValue()); sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + (9 * 2) * sortedRanges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + final BytesStreamOutput out = new BytesStreamOutput(5 + (9 * 2) * sortedRanges.size()); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { byte[] encodedFrom = encodeLong(((Number) range.from).longValue()); @@ -92,10 +90,10 @@ static BytesRef encodeLongRanges(Set ranges) throws IOEx byte[] encodedTo = encodeLong(((Number) range.to).longValue()); out.writeBytes(encodedTo, encodedTo.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } - static List decodeLongRanges(BytesRef encodedRanges) { + static List decodeLongRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.LONG, BinaryRangeUtil::decodeLong); } @@ -105,8 +103,7 @@ static BytesRef encodeDoubleRanges(Set ranges) throws IO Comparator toComparator = Comparator.comparingDouble(range -> ((Number) range.to).doubleValue()); sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + (8 * 2) * sortedRanges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + final BytesStreamOutput out = new BytesStreamOutput(5 + (8 * 2) * sortedRanges.size()); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { byte[] encodedFrom = encodeDouble(((Number) range.from).doubleValue()); @@ -114,14 +111,14 @@ static BytesRef encodeDoubleRanges(Set ranges) throws IO byte[] encodedTo = encodeDouble(((Number) range.to).doubleValue()); out.writeBytes(encodedTo, encodedTo.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } - static List decodeDoubleRanges(BytesRef encodedRanges) { + static List decodeDoubleRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.DOUBLE, BinaryRangeUtil::decodeDouble); } - static List decodeFloatRanges(BytesRef encodedRanges) { + static List decodeFloatRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.FLOAT, BinaryRangeUtil::decodeFloat); } @@ -129,11 +126,10 @@ static List decodeRanges( BytesRef encodedRanges, RangeType rangeType, TriFunction decodeBytes - ) { + ) throws IOException { RangeType.LengthType lengthType = rangeType.lengthType; - ByteArrayDataInput in = new ByteArrayDataInput(); - in.reset(encodedRanges.bytes, encodedRanges.offset, encodedRanges.length); + BytesStreamInput in = new BytesStreamInput(encodedRanges.bytes, encodedRanges.offset, encodedRanges.length); int numRanges = in.readVInt(); List ranges = new ArrayList<>(numRanges); @@ -161,8 +157,7 @@ static BytesRef encodeFloatRanges(Set ranges) throws IOE Comparator toComparator = Comparator.comparingDouble(range -> ((Number) range.to).floatValue()); sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + (4 * 2) * sortedRanges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + final BytesStreamOutput out = new BytesStreamOutput(5 + (4 * 2) * sortedRanges.size()); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { byte[] encodedFrom = encodeFloat(((Number) range.from).floatValue()); @@ -170,7 +165,7 @@ static BytesRef encodeFloatRanges(Set ranges) throws IOE byte[] encodedTo = encodeFloat(((Number) range.to).floatValue()); out.writeBytes(encodedTo, encodedTo.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } static byte[] encodeDouble(double number) { diff --git a/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java index 6933175d4ef37..a14364640d6ee 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java @@ -159,12 +159,12 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), mapperService, nullValue) { + return new SourceValueFetcher(name(), context, nullValue) { @Override protected Boolean parseSourceValue(Object value) { if (value instanceof Boolean) { diff --git a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java index cd93a7a18b07e..5d0e2349aca6a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java @@ -54,6 +54,7 @@ import org.opensearch.common.xcontent.XContentParser.Token; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.suggest.completion.CompletionSuggester; import org.opensearch.search.suggest.completion.context.ContextMapping; @@ -363,12 +364,12 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new ArraySourceValueFetcher(name(), mapperService) { + return new ArraySourceValueFetcher(name(), context) { @Override protected List parseSourceValue(Object value) { if (value instanceof List) { diff --git a/server/src/main/java/org/opensearch/index/mapper/DataStreamFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DataStreamFieldMapper.java index 8961987bca811..03e792c4893d4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DataStreamFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DataStreamFieldMapper.java @@ -66,7 +66,7 @@ private DataStreamFieldType() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + typeName() + "]"); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index ffd7f90821767..9aa7b019a8e61 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -370,13 +370,13 @@ public long parse(String value) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { DateFormatter defaultFormatter = dateTimeFormatter(); DateFormatter formatter = format != null ? DateFormatter.forPattern(format).withLocale(defaultFormatter.locale()) : defaultFormatter; - return new SourceValueFetcher(name(), mapperService, nullValue) { + return new SourceValueFetcher(name(), context, nullValue) { @Override public String parseSourceValue(Object value) { String date = value.toString(); diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 8b406c4691018..2d6880c6b4186 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -114,10 +114,6 @@ public Mapper.TypeParser.ParserContext parserContext(DateFormatter dateFormatter } public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException { - return parse(type, source, null); - } - - public DocumentMapper parse(@Nullable String type, CompressedXContent source, String defaultSource) throws MapperParsingException { Map mapping = null; if (source != null) { Map root = XContentHelper.convertToMap(source.compressedReference(), true, XContentType.JSON).v2(); @@ -128,22 +124,14 @@ public DocumentMapper parse(@Nullable String type, CompressedXContent source, St if (mapping == null) { mapping = new HashMap<>(); } - return parse(type, mapping, defaultSource); + return parse(type, mapping); } @SuppressWarnings({ "unchecked" }) - private DocumentMapper parse(String type, Map mapping, String defaultSource) throws MapperParsingException { + private DocumentMapper parse(String type, Map mapping) throws MapperParsingException { if (type == null) { throw new MapperParsingException("Failed to derive type"); } - - if (defaultSource != null) { - Tuple> t = extractMapping(MapperService.DEFAULT_MAPPING, defaultSource); - if (t.v2() != null) { - XContentHelper.mergeDefaults(mapping, t.v2()); - } - } - Mapper.TypeParser.ParserContext parserContext = parserContext(); // parse RootObjectMapper DocumentMapper.Builder docBuilder = new DocumentMapper.Builder( diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index c7317ef639eff..30579f501a50c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -141,10 +141,6 @@ private static void internalParseDocument( } private void validateType(SourceToParse source) { - if (docMapper.type().equals(MapperService.DEFAULT_MAPPING)) { - throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]"); - } - if (Objects.equals(source.type(), docMapper.type()) == false && MapperService.SINGLE_MAPPING_NAME.equals(source.type()) == false) { // used // by // typeless diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldNamesFieldMapper.java index b038de69b2e23..500df197f4f04 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldNamesFieldMapper.java @@ -139,7 +139,7 @@ public boolean isEnabled() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } diff --git a/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java index 6057c41801672..7ca7b9e81be96 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java @@ -132,7 +132,7 @@ public boolean isSearchable() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } diff --git a/server/src/main/java/org/opensearch/index/mapper/IgnoredFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IgnoredFieldMapper.java index ff26f18b1f67d..0d82b20cfaea6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IgnoredFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IgnoredFieldMapper.java @@ -90,7 +90,7 @@ public Query existsQuery(QueryShardContext context) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java index d7ebdc4c1c2c7..ae32aa52162fe 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java @@ -86,7 +86,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java index dcd68d778138e..3ba5e1e0dfe35 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java @@ -188,11 +188,11 @@ private static InetAddress parse(Object value) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), mapperService, nullValue) { + return new SourceValueFetcher(name(), context, nullValue) { @Override protected Object parseSourceValue(Object value) { InetAddress address; diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 3192a8d709f10..4206b29e23456 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -45,6 +45,7 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.similarity.SimilarityProvider; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.lookup.SearchLookup; @@ -289,12 +290,12 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), mapperService, nullValue) { + return new SourceValueFetcher(name(), context, nullValue) { @Override protected String parseSourceValue(Object value) { String keywordValue = value.toString(); diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 426e74551d365..575cfc8ca424b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -59,6 +59,7 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.query.DistanceFeatureQueryBuilder; +import org.opensearch.index.query.IntervalMode; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; @@ -126,7 +127,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S * for metadata fields, field types should not throw {@link UnsupportedOperationException} since this * could cause a search retrieving multiple fields (like "fields": ["*"]) to fail. */ - public abstract ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, @Nullable String format); + public abstract ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, @Nullable String format); /** Returns the name of this type, as would be specified in mapping properties */ public abstract String typeName(); @@ -365,7 +366,7 @@ public Query distanceFeatureQuery(Object origin, String pivot, float boost, Quer /** * Create an {@link IntervalsSource} to be used for proximity queries */ - public IntervalsSource intervals(String query, int max_gaps, boolean ordered, NamedAnalyzer analyzer, boolean prefix) + public IntervalsSource intervals(String query, int max_gaps, IntervalMode mode, NamedAnalyzer analyzer, boolean prefix) throws IOException { throw new IllegalArgumentException( "Can only use interval queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]" diff --git a/server/src/main/java/org/opensearch/index/mapper/Mapper.java b/server/src/main/java/org/opensearch/index/mapper/Mapper.java index 27507ff78c742..f9f16a33a0c52 100644 --- a/server/src/main/java/org/opensearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/Mapper.java @@ -33,6 +33,8 @@ package org.opensearch.index.mapper; import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.xcontent.ToXContentFragment; @@ -69,6 +71,14 @@ public Settings indexSettings() { public Version indexCreatedVersion() { return Version.indexCreated(indexSettings); } + + public Version indexCreatedVersionOrDefault(@Nullable Version defaultValue) { + if (defaultValue == null || hasIndexCreated(indexSettings)) { + return indexCreatedVersion(); + } else { + return defaultValue; + } + } } public abstract static class Builder { @@ -240,4 +250,12 @@ public final String simpleName() { */ public abstract void validate(MappingLookup mappers); + /** + * Check if settings have IndexMetadata.SETTING_INDEX_VERSION_CREATED setting. + * @param settings settings + * @return "true" if settings have IndexMetadata.SETTING_INDEX_VERSION_CREATED setting, "false" otherwise + */ + protected static boolean hasIndexCreated(Settings settings) { + return settings.hasValue(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey()); + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 765f5dc2d2f24..e2a197aa2b4dd 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -41,7 +41,6 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.logging.DeprecationLogger; @@ -115,7 +114,6 @@ public enum MergeReason { MAPPING_RECOVERY; } - public static final String DEFAULT_MAPPING = "_default_"; public static final String SINGLE_MAPPING_NAME = "_doc"; public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting( "index.mapping.nested_fields.limit", @@ -175,10 +173,7 @@ public enum MergeReason { private final IndexAnalyzers indexAnalyzers; - private volatile String defaultMappingSource; - private volatile DocumentMapper mapper; - private volatile DocumentMapper defaultMapper; private final DocumentMapperParser documentParser; private final Version indexVersionCreated; @@ -231,12 +226,6 @@ public MapperService( && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); } - - defaultMappingSource = "{\"_default_\":{}}"; - - if (logger.isTraceEnabled()) { - logger.trace("default mapping source[{}]", defaultMappingSource); - } } public boolean hasNested() { @@ -286,9 +275,6 @@ public boolean updateMapping(final IndexMetadata currentIndexMetadata, final Ind if (mapper != null) { existingMappers.add(mapper.type()); } - if (defaultMapper != null) { - existingMappers.add(DEFAULT_MAPPING); - } final Map updatedEntries; try { // only update entries if needed @@ -304,13 +290,8 @@ public boolean updateMapping(final IndexMetadata currentIndexMetadata, final Ind for (DocumentMapper documentMapper : updatedEntries.values()) { String mappingType = documentMapper.type(); - MappingMetadata mappingMetadata; - if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { - mappingMetadata = newIndexMetadata.defaultMapping(); - } else { - mappingMetadata = newIndexMetadata.mapping(); - assert mappingType.equals(mappingMetadata.type()); - } + MappingMetadata mappingMetadata = newIndexMetadata.mapping(); + assert mappingType.equals(mappingMetadata.type()); CompressedXContent incomingMappingSource = mappingMetadata.source(); String op = existingMappers.contains(mappingType) ? "updated" : "added"; @@ -351,20 +332,6 @@ private void assertMappingVersion( // if the mapping version is unchanged, then there should not be any updates and all mappings should be the same assert updatedEntries.isEmpty() : updatedEntries; - MappingMetadata defaultMapping = newIndexMetadata.defaultMapping(); - if (defaultMapping != null) { - final CompressedXContent currentSource = currentIndexMetadata.defaultMapping().source(); - final CompressedXContent newSource = defaultMapping.source(); - assert currentSource.equals(newSource) : "expected current mapping [" - + currentSource - + "] for type [" - + defaultMapping.type() - + "] " - + "to be the same as new mapping [" - + newSource - + "]"; - } - MappingMetadata mapping = newIndexMetadata.mapping(); if (mapping != null) { final CompressedXContent currentSource = currentIndexMetadata.mapping().source(); @@ -400,13 +367,8 @@ private void assertMappingVersion( + "]"; assert updatedEntries.isEmpty() == false; for (final DocumentMapper documentMapper : updatedEntries.values()) { - final MappingMetadata currentMapping; - if (documentMapper.type().equals(MapperService.DEFAULT_MAPPING)) { - currentMapping = currentIndexMetadata.defaultMapping(); - } else { - currentMapping = currentIndexMetadata.mapping(); - assert currentMapping == null || documentMapper.type().equals(currentMapping.type()); - } + final MappingMetadata currentMapping = currentIndexMetadata.mapping(); + assert currentMapping == null || documentMapper.type().equals(currentMapping.type()); if (currentMapping != null) { final CompressedXContent currentSource = currentMapping.source(); final CompressedXContent newSource = documentMapper.mappingSource(); @@ -448,7 +410,7 @@ public void merge(IndexMetadata indexMetadata, MergeReason reason) { } public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) { - return internalMerge(Collections.singletonMap(type, mappingSource), reason).get(type); + return internalMerge(Collections.singletonMap(type, mappingSource), reason).values().iterator().next(); } private synchronized Map internalMerge(IndexMetadata indexMetadata, MergeReason reason) { @@ -462,52 +424,21 @@ private synchronized Map internalMerge(IndexMetadata ind } private synchronized Map internalMerge(Map mappings, MergeReason reason) { - DocumentMapper defaultMapper = null; - String defaultMappingSource = null; - - if (mappings.containsKey(DEFAULT_MAPPING)) { - // verify we can parse it - // NOTE: never apply the default here - try { - defaultMapper = documentParser.parse(DEFAULT_MAPPING, mappings.get(DEFAULT_MAPPING)); - } catch (Exception e) { - throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, DEFAULT_MAPPING, e.getMessage()); - } - defaultMappingSource = mappings.get(DEFAULT_MAPPING).string(); - } - - final String defaultMappingSourceOrLastStored; - if (defaultMappingSource != null) { - defaultMappingSourceOrLastStored = defaultMappingSource; - } else { - defaultMappingSourceOrLastStored = this.defaultMappingSource; - } - DocumentMapper documentMapper = null; for (Map.Entry entry : mappings.entrySet()) { String type = entry.getKey(); - if (type.equals(DEFAULT_MAPPING)) { - continue; - } - if (documentMapper != null) { throw new IllegalArgumentException("Cannot put multiple mappings: " + mappings.keySet()); } - final boolean applyDefault = - // the default was already applied if we are recovering - reason != MergeReason.MAPPING_RECOVERY - // only apply the default mapping if we don't have the type yet - && this.mapper == null; - try { - documentMapper = documentParser.parse(type, entry.getValue(), applyDefault ? defaultMappingSourceOrLastStored : null); + documentMapper = documentParser.parse(type, entry.getValue()); } catch (Exception e) { throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage()); } } - return internalMerge(defaultMapper, defaultMappingSource, documentMapper, reason); + return internalMerge(documentMapper, reason); } static void validateTypeName(String type) { @@ -535,25 +466,8 @@ static void validateTypeName(String type) { } } - private synchronized Map internalMerge( - @Nullable DocumentMapper defaultMapper, - @Nullable String defaultMappingSource, - DocumentMapper mapper, - MergeReason reason - ) { - + private synchronized Map internalMerge(DocumentMapper mapper, MergeReason reason) { Map results = new LinkedHashMap<>(2); - - if (defaultMapper != null) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException(DEFAULT_MAPPING_ERROR_MESSAGE); - } else if (reason == MergeReason.MAPPING_UPDATE) { // only log in case of explicit mapping updates - deprecationLogger.deprecate("default_mapping_not_allowed", DEFAULT_MAPPING_ERROR_MESSAGE); - } - assert defaultMapper.type().equals(DEFAULT_MAPPING); - results.put(DEFAULT_MAPPING, defaultMapper); - } - DocumentMapper newMapper = null; if (mapper != null) { // check naming @@ -580,10 +494,6 @@ private synchronized Map internalMerge( } // commit the change - if (defaultMappingSource != null) { - this.defaultMappingSource = defaultMappingSource; - this.defaultMapper = defaultMapper; - } if (newMapper != null) { this.mapper = newMapper; } @@ -595,7 +505,7 @@ private synchronized Map internalMerge( private boolean assertSerialization(DocumentMapper mapper) { // capture the source now, it may change due to concurrent parsing final CompressedXContent mappingSource = mapper.mappingSource(); - DocumentMapper newMapper = parse(mapper.type(), mappingSource, false); + DocumentMapper newMapper = parse(mapper.type(), mappingSource); if (newMapper.mappingSource().equals(mappingSource) == false) { throw new IllegalStateException( @@ -609,8 +519,8 @@ private boolean assertSerialization(DocumentMapper mapper) { return true; } - public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException { - return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null); + public DocumentMapper parse(String mappingType, CompressedXContent mappingSource) throws MapperParsingException { + return documentParser.parse(mappingType, mappingSource); } /** @@ -621,17 +531,12 @@ public DocumentMapper documentMapper() { } /** - * Return the {@link DocumentMapper} for the given type. By using the special - * {@value #DEFAULT_MAPPING} type, you can get a {@link DocumentMapper} for - * the default mapping. + * Return the {@link DocumentMapper} for the given type. */ public DocumentMapper documentMapper(String type) { if (mapper != null && type.equals(mapper.type())) { return mapper; } - if (DEFAULT_MAPPING.equals(type)) { - return defaultMapper; - } return null; } @@ -648,15 +553,6 @@ public static boolean isMappingSourceTyped(String type, CompressedXContent mappi return isMappingSourceTyped(type, root); } - /** - * If the _type name is _doc and there is no _doc top-level key then this means that we - * are handling a typeless call. In such a case, we override _doc with the actual type - * name in the mappings. This allows to use typeless APIs on typed indices. - */ - public String getTypeForUpdate(String type, CompressedXContent mappingSource) { - return isMappingSourceTyped(type, mappingSource) == false ? resolveDocumentType(type) : type; - } - /** * Resolves a type from a mapping-related request into the type that should be used when * merging and updating mappings. @@ -683,7 +579,7 @@ public DocumentMapperForType documentMapperWithAutoCreate(String type) { if (mapper != null) { return new DocumentMapperForType(mapper, null); } - mapper = parse(type, null, true); + mapper = parse(type, null); return new DocumentMapperForType(mapper, mapper.mapping()); } diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index 5ea56412399b5..368f4ae4adea3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -1062,12 +1062,12 @@ public Object valueForDisplay(Object value) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), mapperService, nullValue) { + return new SourceValueFetcher(name(), context, nullValue) { @Override protected Object parseSourceValue(Object value) { if (value.equals("")) { diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 40a9fb895508b..6ddb6d21ed5c1 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; +import org.opensearch.Version; import org.opensearch.common.Explicit; import org.opensearch.common.collect.Tuple; import org.opensearch.common.geo.ShapeRelation; @@ -116,15 +117,17 @@ public static class Builder extends ParametrizedFieldMapper.Builder { private final Parameter> meta = Parameter.metaParam(); private final RangeType type; + private final Version indexCreatedVersion; public Builder(String name, RangeType type, Settings settings) { - this(name, type, COERCE_SETTING.get(settings)); + this(name, type, COERCE_SETTING.get(settings), hasIndexCreated(settings) ? Version.indexCreated(settings) : null); } - public Builder(String name, RangeType type, boolean coerceByDefault) { + public Builder(String name, RangeType type, boolean coerceByDefault, Version indexCreatedVersion) { super(name); this.type = type; this.coerce = Parameter.explicitBoolParam("coerce", true, m -> toType(m).coerce, coerceByDefault); + this.indexCreatedVersion = indexCreatedVersion; if (this.type != RangeType.DATE) { format.neverSerialize(); locale.neverSerialize(); @@ -157,8 +160,11 @@ protected RangeFieldType setupFieldType(BuilderContext context) { + " type" ); } + + // The builder context may not have index created version, falling back to indexCreatedVersion + // property of this mapper builder. DateFormatter dateTimeFormatter; - if (Joda.isJodaPattern(context.indexCreatedVersion(), format.getValue())) { + if (Joda.isJodaPattern(context.indexCreatedVersionOrDefault(indexCreatedVersion), format.getValue())) { dateTimeFormatter = Joda.forPattern(format.getValue()).withLocale(locale.getValue()); } else { dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); @@ -263,13 +269,13 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { DateFormatter defaultFormatter = dateTimeFormatter(); DateFormatter formatter = format != null ? DateFormatter.forPattern(format).withLocale(defaultFormatter.locale()) : defaultFormatter; - return new SourceValueFetcher(name(), mapperService) { + return new SourceValueFetcher(name(), context) { @Override @SuppressWarnings("unchecked") @@ -371,6 +377,7 @@ public Query rangeQuery( private final Locale locale; private final boolean coerceByDefault; + private final Version indexCreatedVersion; private RangeFieldMapper( String simpleName, @@ -389,6 +396,7 @@ private RangeFieldMapper( this.format = builder.format.getValue(); this.locale = builder.locale.getValue(); this.coerceByDefault = builder.coerce.getDefaultValue().value(); + this.indexCreatedVersion = builder.indexCreatedVersion; } boolean coerce() { @@ -397,7 +405,7 @@ boolean coerce() { @Override public ParametrizedFieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), type, coerceByDefault).init(this); + return new Builder(simpleName(), type, coerceByDefault, indexCreatedVersion).init(this); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeType.java b/server/src/main/java/org/opensearch/index/mapper/RangeType.java index c23aab9791c07..9b0c374f8b54e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeType.java @@ -273,7 +273,7 @@ public BytesRef encodeRanges(Set ranges) throws IOExcept } @Override - public List decodeRanges(BytesRef bytes) { + public List decodeRanges(BytesRef bytes) throws IOException { return LONG.decodeRanges(bytes); } @@ -375,7 +375,7 @@ public BytesRef encodeRanges(Set ranges) throws IOExcept } @Override - public List decodeRanges(BytesRef bytes) { + public List decodeRanges(BytesRef bytes) throws IOException { return BinaryRangeUtil.decodeFloatRanges(bytes); } @@ -486,7 +486,7 @@ public BytesRef encodeRanges(Set ranges) throws IOExcept } @Override - public List decodeRanges(BytesRef bytes) { + public List decodeRanges(BytesRef bytes) throws IOException { return BinaryRangeUtil.decodeDoubleRanges(bytes); } @@ -600,7 +600,7 @@ public BytesRef encodeRanges(Set ranges) throws IOExcept } @Override - public List decodeRanges(BytesRef bytes) { + public List decodeRanges(BytesRef bytes) throws IOException { return LONG.decodeRanges(bytes); } @@ -692,7 +692,7 @@ public BytesRef encodeRanges(Set ranges) throws IOExcept } @Override - public List decodeRanges(BytesRef bytes) { + public List decodeRanges(BytesRef bytes) throws IOException { return BinaryRangeUtil.decodeLongRanges(bytes); } @@ -946,7 +946,7 @@ protected final Query createRangeQuery( // rounded up via parseFrom and parseTo methods. public abstract BytesRef encodeRanges(Set ranges) throws IOException; - public abstract List decodeRanges(BytesRef bytes); + public abstract List decodeRanges(BytesRef bytes) throws IOException; /** * Given the Range.to or Range.from Object value from a Range instance, converts that value into a Double. Before converting, it diff --git a/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java index c31f02f1917d3..e487673b5c9da 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java @@ -36,6 +36,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import java.util.Collections; @@ -105,7 +106,7 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java index 9c3f56cfc2b0c..490e1a31ae72b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java @@ -138,7 +138,7 @@ private long parse(Object value) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java index 3e2ec7bab0965..8791c23e49051 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java @@ -135,7 +135,7 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } @@ -190,7 +190,7 @@ public void preParse(ParseContext context) throws IOException { context.doc().add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length)); } - if (originalSource != null && adaptedSource != originalSource && context.indexSettings().isSoftDeleteEnabled()) { + if (originalSource != null && adaptedSource != originalSource) { // if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery BytesRef ref = originalSource.toBytesRef(); context.doc().add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java index edcd48a5e12a9..9467f22afa8e8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java @@ -33,6 +33,7 @@ package org.opensearch.index.mapper; import org.opensearch.common.Nullable; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SourceLookup; import java.util.ArrayDeque; @@ -52,17 +53,17 @@ public abstract class SourceValueFetcher implements ValueFetcher { private final Set sourcePaths; private final @Nullable Object nullValue; - public SourceValueFetcher(String fieldName, MapperService mapperService) { - this(fieldName, mapperService, null); + public SourceValueFetcher(String fieldName, QueryShardContext context) { + this(fieldName, context, null); } /** * @param fieldName The name of the field. - * @param mapperService A mapper service. + * @param context A query shard context. * @param nullValue A optional substitute value if the _source value is 'null'. */ - public SourceValueFetcher(String fieldName, MapperService mapperService, Object nullValue) { - this.sourcePaths = mapperService.sourcePath(fieldName); + public SourceValueFetcher(String fieldName, QueryShardContext context, Object nullValue) { + this.sourcePaths = context.sourcePath(fieldName); this.nullValue = nullValue; } @@ -104,11 +105,11 @@ public List fetchValues(SourceLookup lookup) { /** * Creates a {@link SourceValueFetcher} that passes through source values unmodified. */ - public static SourceValueFetcher identity(String fieldName, MapperService mapperService, String format) { + public static SourceValueFetcher identity(String fieldName, QueryShardContext context, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + fieldName + "] doesn't support formats."); } - return new SourceValueFetcher(fieldName, mapperService) { + return new SourceValueFetcher(fieldName, context) { @Override protected Object parseSourceValue(Object value) { return value; @@ -119,11 +120,11 @@ protected Object parseSourceValue(Object value) { /** * Creates a {@link SourceValueFetcher} that converts source values to strings. */ - public static SourceValueFetcher toString(String fieldName, MapperService mapperService, String format) { + public static SourceValueFetcher toString(String fieldName, QueryShardContext context, String format) { if (format != null) { throw new IllegalArgumentException("Field [" + fieldName + "] doesn't support formats."); } - return new SourceValueFetcher(fieldName, mapperService) { + return new SourceValueFetcher(fieldName, context) { @Override protected Object parseSourceValue(Object value) { return value.toString(); diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 7de2ec16eb6c5..bcb3134e532d7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -85,6 +85,7 @@ import org.opensearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.opensearch.index.mapper.Mapper.TypeParser.ParserContext; import org.opensearch.index.query.IntervalBuilder; +import org.opensearch.index.query.IntervalMode; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.similarity.SimilarityProvider; import org.opensearch.search.aggregations.support.CoreValuesSourceType; @@ -516,10 +517,10 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { // Because this internal field is modelled as a multi-field, SourceValueFetcher will look up its // parent field in _source. So we don't need to use the parent field name here. - return SourceValueFetcher.toString(name(), mapperService, format); + return SourceValueFetcher.toString(name(), context, format); } @Override @@ -546,10 +547,10 @@ static final class PrefixFieldType extends StringFieldType { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { // Because this internal field is modelled as a multi-field, SourceValueFetcher will look up its // parent field in _source. So we don't need to use the parent field name here. - return SourceValueFetcher.toString(name(), mapperService, format); + return SourceValueFetcher.toString(name(), context, format); } void setAnalyzer(NamedAnalyzer delegate) { @@ -752,8 +753,8 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.toString(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.toString(name(), context, format); } @Override @@ -789,7 +790,7 @@ public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRew } @Override - public IntervalsSource intervals(String text, int maxGaps, boolean ordered, NamedAnalyzer analyzer, boolean prefix) + public IntervalsSource intervals(String text, int maxGaps, IntervalMode mode, NamedAnalyzer analyzer, boolean prefix) throws IOException { if (getTextSearchInfo().hasPositions() == false) { throw new IllegalArgumentException("Cannot create intervals over field [" + name() + "] with no positions indexed"); @@ -805,7 +806,7 @@ public IntervalsSource intervals(String text, int maxGaps, boolean ordered, Name return Intervals.prefix(normalizedTerm); } IntervalBuilder builder = new IntervalBuilder(name(), analyzer == null ? getTextSearchInfo().getSearchAnalyzer() : analyzer); - return builder.analyzeText(text, maxGaps, ordered); + return builder.analyzeText(text, maxGaps, mode); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java index eb84c472ab73c..ce7bdd3682d83 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java @@ -106,7 +106,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } diff --git a/server/src/main/java/org/opensearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/VersionFieldMapper.java index 966abadbbcb9c..e601894105b59 100644 --- a/server/src/main/java/org/opensearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/VersionFieldMapper.java @@ -69,7 +69,7 @@ public Query termQuery(Object value, QueryShardContext context) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup lookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, String format) { throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); } } diff --git a/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java index ca7e97cfb8bac..b5b4abdbaf118 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java @@ -423,14 +423,10 @@ private void fetch(Client client, GetRequest getRequest, String path, ActionList public void onResponse(GetResponse response) { try { if (!response.isExists()) { - throw new IllegalArgumentException( - "Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() + "] not found" - ); + throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] not found"); } if (response.isSourceEmpty()) { - throw new IllegalArgumentException( - "Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() + "] source disabled" - ); + throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] source disabled"); } String[] pathElements = path.split("\\."); @@ -549,12 +545,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws } else if (this.shape == null) { SetOnce supplier = new SetOnce<>(); queryRewriteContext.registerAsyncAction((client, listener) -> { - GetRequest getRequest; - if (indexedShapeType == null) { - getRequest = new GetRequest(indexedShapeIndex, indexedShapeId); - } else { - getRequest = new GetRequest(indexedShapeIndex, indexedShapeType, indexedShapeId); - } + GetRequest getRequest = new GetRequest(indexedShapeIndex, indexedShapeId); getRequest.routing(indexedShapeRouting); fetch(client, getRequest, indexedShapePath, ActionListener.wrap(builder -> { supplier.set(builder); diff --git a/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java index 2b63e424b4a92..724425957f75a 100644 --- a/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java @@ -32,9 +32,8 @@ package org.opensearch.index.query; -import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.Strings; @@ -44,14 +43,12 @@ import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Objects; @@ -72,8 +69,6 @@ public class IdsQueryBuilder extends AbstractQueryBuilder { private final Set ids = new HashSet<>(); - private String[] types = Strings.EMPTY_ARRAY; - /** * Creates a new IdsQueryBuilder with no types specified upfront */ @@ -86,38 +81,23 @@ public IdsQueryBuilder() { */ public IdsQueryBuilder(StreamInput in) throws IOException { super(in); - types = in.readStringArray(); + if (in.getVersion().before(Version.V_2_0_0)) { + // types no longer relevant so ignore + String[] types = in.readStringArray(); + if (types.length > 0) { + throw new IllegalStateException("types are no longer supported in ids query but found [" + Arrays.toString(types) + "]"); + } + } Collections.addAll(ids, in.readStringArray()); } @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeStringArray(types); - out.writeStringArray(ids.toArray(new String[ids.size()])); - } - - /** - * Add types to query - * - * @deprecated Types are in the process of being removed, prefer to filter on a field instead. - */ - @Deprecated - public IdsQueryBuilder types(String... types) { - if (types == null) { - throw new IllegalArgumentException("[" + NAME + "] types cannot be null"); + if (out.getVersion().before(Version.V_2_0_0)) { + // types not supported so send an empty array to previous versions + out.writeStringArray(Strings.EMPTY_ARRAY); } - this.types = types; - return this; - } - - /** - * Returns the types used in this query - * - * @deprecated Types are in the process of being removed, prefer to filter on a field instead. - */ - @Deprecated - public String[] types() { - return this.types; + out.writeStringArray(ids.toArray(new String[ids.size()])); } /** @@ -141,9 +121,6 @@ public Set ids() { @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); - if (types.length > 0) { - builder.array(TYPE_FIELD.getPreferredName(), types); - } builder.startArray(VALUES_FIELD.getPreferredName()); for (String value : ids) { builder.value(value); @@ -156,18 +133,13 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep private static final ObjectParser PARSER = new ObjectParser<>(NAME, IdsQueryBuilder::new); static { - PARSER.declareStringArray(fromList(String.class, IdsQueryBuilder::types), IdsQueryBuilder.TYPE_FIELD); PARSER.declareStringArray(fromList(String.class, IdsQueryBuilder::addIds), IdsQueryBuilder.VALUES_FIELD); declareStandardFields(PARSER); } public static IdsQueryBuilder fromXContent(XContentParser parser) { try { - IdsQueryBuilder builder = PARSER.apply(parser, null); - if (builder.types().length > 0) { - deprecationLogger.deprecate("ids_query_with_types", TYPES_DEPRECATION_MESSAGE); - } - return builder; + return PARSER.apply(parser, null); } catch (IllegalArgumentException e) { throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); } @@ -193,34 +165,20 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws @Override protected Query doToQuery(QueryShardContext context) throws IOException { - MappedFieldType idField = context.fieldMapper(IdFieldMapper.NAME); + MappedFieldType idField = context.getFieldType(IdFieldMapper.NAME); if (idField == null || ids.isEmpty()) { throw new IllegalStateException("Rewrite first"); } - final DocumentMapper mapper = context.getMapperService().documentMapper(); - Collection typesForQuery; - if (types.length == 0) { - typesForQuery = context.queryTypes(); - } else if (types.length == 1 && Metadata.ALL.equals(types[0])) { - typesForQuery = Collections.singleton(mapper.type()); - } else { - typesForQuery = new HashSet<>(Arrays.asList(types)); - } - - if (typesForQuery.contains(mapper.type())) { - return idField.termsQuery(new ArrayList<>(ids), context); - } else { - return new MatchNoDocsQuery("Type mismatch"); - } + return idField.termsQuery(new ArrayList<>(ids), context); } @Override protected int doHashCode() { - return Objects.hash(ids, Arrays.hashCode(types)); + return Objects.hash(ids); } @Override protected boolean doEquals(IdsQueryBuilder other) { - return Objects.equals(ids, other.ids) && Arrays.equals(types, other.types); + return Objects.equals(ids, other.ids); } } diff --git a/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java b/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java index b3166c39e68c8..e91adc5abe27a 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java @@ -69,13 +69,20 @@ public IntervalBuilder(String field, Analyzer analyzer) { } public IntervalsSource analyzeText(String query, int maxGaps, boolean ordered) throws IOException { + return analyzeText(query, maxGaps, ordered ? IntervalMode.ORDERED : IntervalMode.UNORDERED); + } + + public IntervalsSource analyzeText(String query, int maxGaps, IntervalMode mode) throws IOException { try (TokenStream ts = analyzer.tokenStream(field, query); CachingTokenFilter stream = new CachingTokenFilter(ts)) { - return analyzeText(stream, maxGaps, ordered); + return analyzeText(stream, maxGaps, mode); } } protected IntervalsSource analyzeText(CachingTokenFilter stream, int maxGaps, boolean ordered) throws IOException { + return analyzeText(stream, maxGaps, ordered ? IntervalMode.ORDERED : IntervalMode.UNORDERED); + } + protected IntervalsSource analyzeText(CachingTokenFilter stream, int maxGaps, IntervalMode mode) throws IOException { TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); PositionIncrementAttribute posIncAtt = stream.addAttribute(PositionIncrementAttribute.class); PositionLengthAttribute posLenAtt = stream.addAttribute(PositionLengthAttribute.class); @@ -114,15 +121,15 @@ protected IntervalsSource analyzeText(CachingTokenFilter stream, int maxGaps, bo return analyzeTerm(stream); } else if (isGraph) { // graph - return combineSources(analyzeGraph(stream), maxGaps, ordered); + return combineSources(analyzeGraph(stream), maxGaps, mode); } else { // phrase if (hasSynonyms) { // phrase with single-term synonyms - return analyzeSynonyms(stream, maxGaps, ordered); + return analyzeSynonyms(stream, maxGaps, mode); } else { // simple phrase - return combineSources(analyzeTerms(stream), maxGaps, ordered); + return combineSources(analyzeTerms(stream), maxGaps, mode); } } @@ -135,7 +142,7 @@ protected IntervalsSource analyzeTerm(TokenStream ts) throws IOException { return Intervals.term(BytesRef.deepCopyOf(bytesAtt.getBytesRef())); } - protected static IntervalsSource combineSources(List sources, int maxGaps, boolean ordered) { + protected static IntervalsSource combineSources(List sources, int maxGaps, IntervalMode mode) { if (sources.size() == 0) { return NO_INTERVALS; } @@ -143,10 +150,21 @@ protected static IntervalsSource combineSources(List sources, i return sources.get(0); } IntervalsSource[] sourcesArray = sources.toArray(new IntervalsSource[0]); - if (maxGaps == 0 && ordered) { + if (maxGaps == 0 && mode == IntervalMode.ORDERED) { return Intervals.phrase(sourcesArray); } - IntervalsSource inner = ordered ? Intervals.ordered(sourcesArray) : Intervals.unordered(sourcesArray); + IntervalsSource inner; + if (mode == IntervalMode.ORDERED) { + inner = Intervals.ordered(sourcesArray); + } else if (mode == IntervalMode.UNORDERED) { + inner = Intervals.unordered(sourcesArray); + } else { + inner = Intervals.unorderedNoOverlaps(sourcesArray[0], sourcesArray[1]); + for (int sourceIdx = 2; sourceIdx < sourcesArray.length; sourceIdx++) { + inner = Intervals.unorderedNoOverlaps(maxGaps == -1 ? inner : Intervals.maxgaps(maxGaps, inner), sourcesArray[sourceIdx]); + } + } + if (maxGaps == -1) { return inner; } @@ -174,7 +192,7 @@ public static IntervalsSource extend(IntervalsSource source, int precedingSpaces return Intervals.extend(source, precedingSpaces, 0); } - protected IntervalsSource analyzeSynonyms(TokenStream ts, int maxGaps, boolean ordered) throws IOException { + protected IntervalsSource analyzeSynonyms(TokenStream ts, int maxGaps, IntervalMode mode) throws IOException { List terms = new ArrayList<>(); List synonyms = new ArrayList<>(); TermToBytesRefAttribute bytesAtt = ts.addAttribute(TermToBytesRefAttribute.class); @@ -199,7 +217,7 @@ protected IntervalsSource analyzeSynonyms(TokenStream ts, int maxGaps, boolean o } else { terms.add(extend(Intervals.or(synonyms.toArray(new IntervalsSource[0])), spaces)); } - return combineSources(terms, maxGaps, ordered); + return combineSources(terms, maxGaps, mode); } protected List analyzeGraph(TokenStream source) throws IOException { @@ -222,7 +240,7 @@ protected List analyzeGraph(TokenStream source) throws IOExcept Iterator it = graph.getFiniteStrings(start, end); while (it.hasNext()) { TokenStream ts = it.next(); - IntervalsSource phrase = combineSources(analyzeTerms(ts), 0, true); + IntervalsSource phrase = combineSources(analyzeTerms(ts), 0, IntervalMode.ORDERED); if (paths.size() >= maxClauseCount) { throw new BooleanQuery.TooManyClauses(); } diff --git a/server/src/main/java/org/opensearch/index/query/IntervalMode.java b/server/src/main/java/org/opensearch/index/query/IntervalMode.java new file mode 100644 index 0000000000000..f0489bf452678 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/IntervalMode.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; + +public enum IntervalMode implements Writeable { + ORDERED(0), + UNORDERED(1), + UNORDERED_NO_OVERLAP(2); + + private final int ordinal; + + IntervalMode(int ordinal) { + this.ordinal = ordinal; + } + + public static IntervalMode readFromStream(StreamInput in) throws IOException { + int ord = in.readVInt(); + switch (ord) { + case (0): + return ORDERED; + case (1): + return UNORDERED; + case (2): + return UNORDERED_NO_OVERLAP; + } + throw new OpenSearchException("unknown serialized type [" + ord + "]"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(this.ordinal); + } + + public static IntervalMode fromString(String intervalMode) { + if (intervalMode == null) { + throw new IllegalArgumentException("cannot parse mode from null string"); + } + + for (IntervalMode mode : IntervalMode.values()) { + if (mode.name().equalsIgnoreCase(intervalMode)) { + return mode; + } + } + throw new IllegalArgumentException("no mode can be parsed from ordinal " + intervalMode); + } +} diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index 166ac3608ca55..b53dfc32d7006 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -39,7 +39,10 @@ import org.apache.lucene.queries.intervals.IntervalsSource; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.CompiledAutomaton; +import org.apache.lucene.util.automaton.RegExp; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.io.stream.NamedWriteable; @@ -71,7 +74,7 @@ * Factory class for {@link IntervalsSource} * * Built-in sources include {@link Match}, which analyzes a text string and converts it - * to a proximity source (phrase, ordered or unordered depending on how + * to a proximity source (phrase, ordered, unordered, unordered without overlaps depending on how * strict the matching should be); {@link Combine}, which allows proximity queries * between different sub-sources; and {@link Disjunction}. */ @@ -100,12 +103,14 @@ public static IntervalsSourceProvider fromXContent(XContentParser parser) throws return Prefix.fromXContent(parser); case "wildcard": return Wildcard.fromXContent(parser); + case "regexp": + return Regexp.fromXContent(parser); case "fuzzy": return Fuzzy.fromXContent(parser); } throw new ParsingException( parser.getTokenLocation(), - "Unknown interval type [" + parser.currentName() + "], expecting one of [match, any_of, all_of, prefix, wildcard]" + "Unknown interval type [" + parser.currentName() + "], expecting one of [match, any_of, all_of, prefix, wildcard, regexp]" ); } @@ -126,15 +131,15 @@ public static class Match extends IntervalsSourceProvider { private final String query; private final int maxGaps; - private final boolean ordered; + private final IntervalMode mode; private final String analyzer; private final IntervalFilter filter; private final String useField; - public Match(String query, int maxGaps, boolean ordered, String analyzer, IntervalFilter filter, String useField) { + public Match(String query, int maxGaps, IntervalMode mode, String analyzer, IntervalFilter filter, String useField) { this.query = query; this.maxGaps = maxGaps; - this.ordered = ordered; + this.mode = mode; this.analyzer = analyzer; this.filter = filter; this.useField = useField; @@ -143,7 +148,15 @@ public Match(String query, int maxGaps, boolean ordered, String analyzer, Interv public Match(StreamInput in) throws IOException { this.query = in.readString(); this.maxGaps = in.readVInt(); - this.ordered = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_1_3_0)) { + this.mode = IntervalMode.readFromStream(in); + } else { + if (in.readBoolean()) { + this.mode = IntervalMode.ORDERED; + } else { + this.mode = IntervalMode.UNORDERED; + } + } this.analyzer = in.readOptionalString(); this.filter = in.readOptionalWriteable(IntervalFilter::new); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { @@ -163,9 +176,9 @@ public IntervalsSource getSource(QueryShardContext context, MappedFieldType fiel if (useField != null) { fieldType = context.fieldMapper(useField); assert fieldType != null; - source = Intervals.fixField(useField, fieldType.intervals(query, maxGaps, ordered, analyzer, false)); + source = Intervals.fixField(useField, fieldType.intervals(query, maxGaps, mode, analyzer, false)); } else { - source = fieldType.intervals(query, maxGaps, ordered, analyzer, false); + source = fieldType.intervals(query, maxGaps, mode, analyzer, false); } if (filter != null) { return filter.filter(source, context, fieldType); @@ -186,7 +199,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Match match = (Match) o; return maxGaps == match.maxGaps - && ordered == match.ordered + && mode == match.mode && Objects.equals(query, match.query) && Objects.equals(filter, match.filter) && Objects.equals(useField, match.useField) @@ -195,7 +208,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(query, maxGaps, ordered, analyzer, filter, useField); + return Objects.hash(query, maxGaps, mode, analyzer, filter, useField); } @Override @@ -207,7 +220,11 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(query); out.writeVInt(maxGaps); - out.writeBoolean(ordered); + if (out.getVersion().onOrAfter(Version.V_1_3_0)) { + mode.writeTo(out); + } else { + out.writeBoolean(mode == IntervalMode.ORDERED); + } out.writeOptionalString(analyzer); out.writeOptionalWriteable(filter); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { @@ -221,7 +238,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("query", query); builder.field("max_gaps", maxGaps); - builder.field("ordered", ordered); + builder.field("mode", mode); if (analyzer != null) { builder.field("analyzer", analyzer); } @@ -237,16 +254,28 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { String query = (String) args[0]; int max_gaps = (args[1] == null ? -1 : (Integer) args[1]); - boolean ordered = (args[2] != null && (boolean) args[2]); - String analyzer = (String) args[3]; - IntervalFilter filter = (IntervalFilter) args[4]; - String useField = (String) args[5]; - return new Match(query, max_gaps, ordered, analyzer, filter, useField); + Boolean ordered = (Boolean) args[2]; + String mode = (String) args[3]; + String analyzer = (String) args[4]; + IntervalFilter filter = (IntervalFilter) args[5]; + String useField = (String) args[6]; + + IntervalMode intervalMode; + if (ordered != null) { + intervalMode = ordered ? IntervalMode.ORDERED : IntervalMode.UNORDERED; + } else if (mode != null) { + intervalMode = IntervalMode.fromString(mode); + } else { + intervalMode = IntervalMode.UNORDERED; + } + + return new Match(query, max_gaps, intervalMode, analyzer, filter, useField); }); static { PARSER.declareString(constructorArg(), new ParseField("query")); PARSER.declareInt(optionalConstructorArg(), new ParseField("max_gaps")); - PARSER.declareBoolean(optionalConstructorArg(), new ParseField("ordered")); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("ordered").withAllDeprecated()); + PARSER.declareString(optionalConstructorArg(), new ParseField("mode")); PARSER.declareString(optionalConstructorArg(), new ParseField("analyzer")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> IntervalFilter.fromXContent(p), new ParseField("filter")); PARSER.declareString(optionalConstructorArg(), new ParseField("use_field")); @@ -264,8 +293,8 @@ int getMaxGaps() { return maxGaps; } - boolean isOrdered() { - return ordered; + IntervalMode getMode() { + return mode; } String getAnalyzer() { @@ -391,19 +420,23 @@ public static class Combine extends IntervalsSourceProvider { public static final String NAME = "all_of"; private final List subSources; - private final boolean ordered; + private final IntervalMode mode; private final int maxGaps; private final IntervalFilter filter; - public Combine(List subSources, boolean ordered, int maxGaps, IntervalFilter filter) { + public Combine(List subSources, IntervalMode mode, int maxGaps, IntervalFilter filter) { this.subSources = subSources; - this.ordered = ordered; + this.mode = mode; this.maxGaps = maxGaps; this.filter = filter; } public Combine(StreamInput in) throws IOException { - this.ordered = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_1_3_0)) { + this.mode = IntervalMode.readFromStream(in); + } else { + this.mode = in.readBoolean() ? IntervalMode.ORDERED : IntervalMode.UNORDERED; + } this.subSources = in.readNamedWriteableList(IntervalsSourceProvider.class); this.maxGaps = in.readInt(); this.filter = in.readOptionalWriteable(IntervalFilter::new); @@ -415,7 +448,7 @@ public IntervalsSource getSource(QueryShardContext ctx, MappedFieldType fieldTyp for (IntervalsSourceProvider provider : subSources) { ss.add(provider.getSource(ctx, fieldType)); } - IntervalsSource source = IntervalBuilder.combineSources(ss, maxGaps, ordered); + IntervalsSource source = IntervalBuilder.combineSources(ss, maxGaps, mode); if (filter != null) { return filter.filter(source, ctx, fieldType); } @@ -435,14 +468,14 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Combine combine = (Combine) o; return Objects.equals(subSources, combine.subSources) - && ordered == combine.ordered + && mode == combine.mode && maxGaps == combine.maxGaps && Objects.equals(filter, combine.filter); } @Override public int hashCode() { - return Objects.hash(subSources, ordered, maxGaps, filter); + return Objects.hash(subSources, mode, maxGaps, filter); } @Override @@ -452,7 +485,11 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(ordered); + if (out.getVersion().onOrAfter(Version.V_1_3_0)) { + mode.writeTo(out); + } else { + out.writeBoolean(mode == IntervalMode.ORDERED); + } out.writeNamedWriteableList(subSources); out.writeInt(maxGaps); out.writeOptionalWriteable(filter); @@ -461,7 +498,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); - builder.field("ordered", ordered); + builder.field("mode", mode); builder.field("max_gaps", maxGaps); builder.startArray("intervals"); for (IntervalsSourceProvider provider : subSources) { @@ -478,14 +515,26 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { - boolean ordered = (args[0] != null && (boolean) args[0]); - List subSources = (List) args[1]; - Integer maxGaps = (args[2] == null ? -1 : (Integer) args[2]); - IntervalFilter filter = (IntervalFilter) args[3]; - return new Combine(subSources, ordered, maxGaps, filter); + Boolean ordered = (Boolean) args[0]; + String mode = (String) args[1]; + List subSources = (List) args[2]; + Integer maxGaps = (args[3] == null ? -1 : (Integer) args[3]); + IntervalFilter filter = (IntervalFilter) args[4]; + + IntervalMode intervalMode; + if (ordered != null) { + intervalMode = ordered ? IntervalMode.ORDERED : IntervalMode.UNORDERED; + } else if (mode != null) { + intervalMode = IntervalMode.fromString(mode); + } else { + intervalMode = IntervalMode.UNORDERED; + } + + return new Combine(subSources, intervalMode, maxGaps, filter); }); static { - PARSER.declareBoolean(optionalConstructorArg(), new ParseField("ordered")); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("ordered").withAllDeprecated()); + PARSER.declareString(optionalConstructorArg(), new ParseField("mode")); PARSER.declareObjectArray( constructorArg(), (p, c) -> IntervalsSourceProvider.parseInnerIntervals(p), @@ -503,8 +552,8 @@ List getSubSources() { return subSources; } - boolean isOrdered() { - return ordered; + IntervalMode getMode() { + return mode; } int getMaxGaps() { @@ -546,9 +595,9 @@ public IntervalsSource getSource(QueryShardContext context, MappedFieldType fiel if (useField != null) { fieldType = context.fieldMapper(useField); assert fieldType != null; - source = Intervals.fixField(useField, fieldType.intervals(prefix, 0, false, analyzer, true)); + source = Intervals.fixField(useField, fieldType.intervals(prefix, 0, IntervalMode.UNORDERED, analyzer, true)); } else { - source = fieldType.intervals(prefix, 0, false, analyzer, true); + source = fieldType.intervals(prefix, 0, IntervalMode.UNORDERED, analyzer, true); } return source; } @@ -630,6 +679,185 @@ String getUseField() { } } + public static class Regexp extends IntervalsSourceProvider { + + public static final String NAME = "regexp"; + public static final int DEFAULT_FLAGS_VALUE = RegexpFlag.ALL.value(); + + private final String pattern; + private final int flags; + private final String useField; + private final Integer maxExpansions; + private final boolean caseInsensitive; + + /** + * Constructor + * + * {@code flags} is Lucene's syntax flags + * and {@code caseInsensitive} enables Lucene's only matching flag. + */ + public Regexp(String pattern, int flags, String useField, Integer maxExpansions, boolean caseInsensitive) { + this.pattern = pattern; + this.flags = flags; + this.useField = useField; + this.maxExpansions = (maxExpansions != null && maxExpansions > 0) ? maxExpansions : null; + this.caseInsensitive = caseInsensitive; + } + + public Regexp(StreamInput in) throws IOException { + this.pattern = in.readString(); + this.flags = in.readVInt(); + this.useField = in.readOptionalString(); + this.maxExpansions = in.readOptionalVInt(); + if (in.getVersion().onOrAfter(Version.V_1_3_0)) { + this.caseInsensitive = in.readBoolean(); + } else { + this.caseInsensitive = false; + } + } + + @Override + public IntervalsSource getSource(QueryShardContext context, MappedFieldType fieldType) { + final org.apache.lucene.util.automaton.RegExp regexp = new org.apache.lucene.util.automaton.RegExp( + pattern, + flags, + caseInsensitive ? RegExp.ASCII_CASE_INSENSITIVE : 0 + ); + final CompiledAutomaton automaton = new CompiledAutomaton(regexp.toAutomaton()); + + if (useField != null) { + fieldType = context.fieldMapper(useField); + assert fieldType != null; + checkPositions(fieldType); + + IntervalsSource regexpSource = maxExpansions == null + ? Intervals.multiterm(automaton, regexp.toString()) + : Intervals.multiterm(automaton, maxExpansions, regexp.toString()); + return Intervals.fixField(useField, regexpSource); + } else { + checkPositions(fieldType); + return maxExpansions == null + ? Intervals.multiterm(automaton, regexp.toString()) + : Intervals.multiterm(automaton, maxExpansions, regexp.toString()); + } + } + + private void checkPositions(MappedFieldType type) { + if (type.getTextSearchInfo().hasPositions() == false) { + throw new IllegalArgumentException("Cannot create intervals over field [" + type.name() + "] with no positions indexed"); + } + } + + @Override + public void extractFields(Set fields) { + if (useField != null) { + fields.add(useField); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Regexp regexp = (Regexp) o; + return Objects.equals(pattern, regexp.pattern) + && Objects.equals(flags, regexp.flags) + && Objects.equals(useField, regexp.useField) + && Objects.equals(maxExpansions, regexp.maxExpansions) + && Objects.equals(caseInsensitive, regexp.caseInsensitive); + } + + @Override + public int hashCode() { + return Objects.hash(pattern, flags, useField, maxExpansions, caseInsensitive); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pattern); + out.writeVInt(flags); + out.writeOptionalString(useField); + out.writeOptionalVInt(maxExpansions); + if (out.getVersion().onOrAfter(Version.V_1_3_0)) { + out.writeBoolean(caseInsensitive); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field("pattern", pattern); + if (flags != DEFAULT_FLAGS_VALUE) { + builder.field("flags_value", flags); + } + if (useField != null) { + builder.field("use_field", useField); + } + if (maxExpansions != null) { + builder.field("max_expansions", maxExpansions); + } + if (caseInsensitive) { + builder.field("case_insensitive", caseInsensitive); + } + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + String pattern = (String) args[0]; + String flags = (String) args[1]; + Integer flagsValue = (Integer) args[2]; + String useField = (String) args[3]; + Integer maxExpansions = (Integer) args[4]; + boolean caseInsensitive = args[5] != null && (boolean) args[5]; + + if (flagsValue != null) { + return new Regexp(pattern, flagsValue, useField, maxExpansions, caseInsensitive); + } else if (flags != null) { + return new Regexp(pattern, RegexpFlag.resolveValue(flags), useField, maxExpansions, caseInsensitive); + } else { + return new Regexp(pattern, DEFAULT_FLAGS_VALUE, useField, maxExpansions, caseInsensitive); + } + }); + static { + PARSER.declareString(constructorArg(), new ParseField("pattern")); + PARSER.declareString(optionalConstructorArg(), new ParseField("flags")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("flags_value")); + PARSER.declareString(optionalConstructorArg(), new ParseField("use_field")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("max_expansions")); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("case_insensitive")); + } + + public static Regexp fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + String getPattern() { + return pattern; + } + + int getFlags() { + return flags; + } + + String getUseField() { + return useField; + } + + Integer getMaxExpansions() { + return maxExpansions; + } + + boolean isCaseInsensitive() { + return caseInsensitive; + } + } + public static class Wildcard extends IntervalsSourceProvider { public static final String NAME = "wildcard"; @@ -637,17 +865,24 @@ public static class Wildcard extends IntervalsSourceProvider { private final String pattern; private final String analyzer; private final String useField; + private final Integer maxExpansions; - public Wildcard(String pattern, String analyzer, String useField) { + public Wildcard(String pattern, String analyzer, String useField, Integer maxExpansions) { this.pattern = pattern; this.analyzer = analyzer; this.useField = useField; + this.maxExpansions = (maxExpansions != null && maxExpansions > 0) ? maxExpansions : null; } public Wildcard(StreamInput in) throws IOException { this.pattern = in.readString(); this.analyzer = in.readOptionalString(); this.useField = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_1_3_0)) { + this.maxExpansions = in.readOptionalVInt(); + } else { + this.maxExpansions = null; + } } @Override @@ -665,11 +900,14 @@ public IntervalsSource getSource(QueryShardContext context, MappedFieldType fiel analyzer = fieldType.getTextSearchInfo().getSearchAnalyzer(); } BytesRef normalizedTerm = analyzer.normalize(useField, pattern); - source = Intervals.fixField(useField, Intervals.wildcard(normalizedTerm)); + IntervalsSource wildcardSource = maxExpansions == null + ? Intervals.wildcard(normalizedTerm) + : Intervals.wildcard(normalizedTerm, maxExpansions); + source = Intervals.fixField(useField, wildcardSource); } else { checkPositions(fieldType); BytesRef normalizedTerm = analyzer.normalize(fieldType.name(), pattern); - source = Intervals.wildcard(normalizedTerm); + source = maxExpansions == null ? Intervals.wildcard(normalizedTerm) : Intervals.wildcard(normalizedTerm, maxExpansions); } return source; } @@ -694,12 +932,13 @@ public boolean equals(Object o) { Wildcard wildcard = (Wildcard) o; return Objects.equals(pattern, wildcard.pattern) && Objects.equals(analyzer, wildcard.analyzer) - && Objects.equals(useField, wildcard.useField); + && Objects.equals(useField, wildcard.useField) + && Objects.equals(maxExpansions, wildcard.maxExpansions); } @Override public int hashCode() { - return Objects.hash(pattern, analyzer, useField); + return Objects.hash(pattern, analyzer, useField, maxExpansions); } @Override @@ -712,6 +951,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(pattern); out.writeOptionalString(analyzer); out.writeOptionalString(useField); + if (out.getVersion().onOrAfter(Version.V_1_3_0)) { + out.writeOptionalVInt(maxExpansions); + } } @Override @@ -724,6 +966,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (useField != null) { builder.field("use_field", useField); } + if (maxExpansions != null) { + builder.field("max_expansions", maxExpansions); + } builder.endObject(); return builder; } @@ -732,12 +977,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws String term = (String) args[0]; String analyzer = (String) args[1]; String useField = (String) args[2]; - return new Wildcard(term, analyzer, useField); + Integer maxExpansions = (Integer) args[3]; + return new Wildcard(term, analyzer, useField, maxExpansions); }); static { PARSER.declareString(constructorArg(), new ParseField("pattern")); PARSER.declareString(optionalConstructorArg(), new ParseField("analyzer")); PARSER.declareString(optionalConstructorArg(), new ParseField("use_field")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("max_expansions")); } public static Wildcard fromXContent(XContentParser parser) throws IOException { @@ -755,6 +1002,10 @@ String getAnalyzer() { String getUseField() { return useField; } + + Integer getMaxExpansions() { + return maxExpansions; + } } public static class Fuzzy extends IntervalsSourceProvider { diff --git a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java index a72fc8efb2284..1ecc88387e549 100644 --- a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.Query; import org.opensearch.OpenSearchParseException; import org.opensearch.ExceptionsHelper; +import org.opensearch.Version; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.termvectors.MultiTermVectorsItemResponse; import org.opensearch.action.termvectors.MultiTermVectorsRequest; @@ -54,7 +55,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.search.MoreLikeThisQuery; import org.opensearch.common.lucene.search.XMoreLikeThis; import org.opensearch.common.lucene.uid.Versions; @@ -67,7 +67,6 @@ import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.TextFieldMapper.TextFieldType; import java.io.IOException; @@ -81,7 +80,6 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.stream.Stream; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -92,7 +90,6 @@ */ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "more_like_this"; - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MoreLikeThisQueryBuilder.class); static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [more_like_this] " + "queries. The type should no longer be specified in the [like] and [unlike] sections."; @@ -128,7 +125,6 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder item.type == null); - } - private static void parseLikeField(XContentParser parser, List texts, List items) throws IOException { if (parser.currentToken().isValue()) { texts.add(parser.text()); @@ -1150,9 +1081,6 @@ private static void setDefaultIndexTypeFields( if (item.index() == null) { item.index(context.index().getName()); } - if (item.type() == null) { - item.type(MapperService.SINGLE_MAPPING_NAME); - } // default fields if not present but don't override for artificial docs if ((item.fields() == null || item.fields().length == 0) && item.doc() == null) { if (useDefaultField) { diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilders.java b/server/src/main/java/org/opensearch/index/query/QueryBuilders.java index 90a3cbbb54a63..5b386564df1e8 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilders.java @@ -153,18 +153,6 @@ public static IdsQueryBuilder idsQuery() { return new IdsQueryBuilder(); } - /** - * Constructs a query that will match only specific ids within types. - * - * @param types The mapping/doc type - * - * @deprecated Types are in the process of being removed, use {@link #idsQuery()} instead. - */ - @Deprecated - public static IdsQueryBuilder idsQuery(String... types) { - return new IdsQueryBuilder().types(types); - } - /** * A Query that matches documents containing a term. * @@ -622,15 +610,6 @@ public static WrapperQueryBuilder wrapperQuery(byte[] source) { return new WrapperQueryBuilder(source); } - /** - * A filter based on doc/mapping type. - * @deprecated Types are going away, prefer filtering on a field. - */ - @Deprecated - public static TypeQueryBuilder typeQuery(String type) { - return new TypeQueryBuilder(type); - } - /** * A terms query that can extract the terms from another doc in an index. */ diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index 47bade8b6aa51..4e6077889576d 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -44,7 +44,6 @@ import org.opensearch.client.Client; import org.opensearch.common.CheckedFunction; import org.opensearch.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.common.TriFunction; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.search.Queries; @@ -76,9 +75,6 @@ import org.opensearch.transport.RemoteClusterAware; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -105,7 +101,6 @@ public class QueryShardContext extends QueryRewriteContext { private final TriFunction, IndexFieldData> indexFieldDataService; private final int shardId; private final IndexSearcher searcher; - private String[] types = Strings.EMPTY_ARRAY; private boolean cacheable = true; private final SetOnce frozen = new SetOnce<>(); @@ -113,14 +108,6 @@ public class QueryShardContext extends QueryRewriteContext { private final Predicate indexNameMatcher; private final BooleanSupplier allowExpensiveQueries; - public void setTypes(String... types) { - this.types = types; - } - - public String[] getTypes() { - return types; - } - private final Map namedQueries = new HashMap<>(); private boolean allowUnmappedFields; private boolean mapUnmappedFieldAsString; @@ -295,6 +282,19 @@ public Set simpleMatchToIndexNames(String pattern) { return mapperService.simpleMatchToFullName(pattern); } + /** + * Returns the {@link MappedFieldType} for the provided field name. + * If the field is not mapped, the behaviour depends on the index.query.parse.allow_unmapped_fields setting, which defaults to true. + * In case unmapped fields are allowed, null is returned when the field is not mapped. + * In case unmapped fields are not allowed, either an exception is thrown or the field is automatically mapped as a text field. + * @throws QueryShardException if unmapped fields are not allowed and automatically mapping unmapped fields as text is disabled. + * @see QueryShardContext#setAllowUnmappedFields(boolean) + * @see QueryShardContext#setMapUnmappedFieldAsString(boolean) + */ + public MappedFieldType getFieldType(String name) { + return failIfFieldMappingNotFound(name, mapperService.fieldType(name)); + } + public MappedFieldType fieldMapper(String name) { return failIfFieldMappingNotFound(name, mapperService.fieldType(name)); } @@ -303,6 +303,14 @@ public ObjectMapper getObjectMapper(String name) { return mapperService.getObjectMapper(name); } + public boolean isMetadataField(String field) { + return mapperService.isMetadataField(field); + } + + public Set sourcePath(String fullName) { + return mapperService.sourcePath(fullName); + } + /** * Returns s {@link DocumentMapper} instance for the given type. * Delegates to {@link MapperService#documentMapper(String)} @@ -356,18 +364,6 @@ MappedFieldType failIfFieldMappingNotFound(String name, MappedFieldType fieldMap } } - /** - * Returns the narrowed down explicit types, or, if not set, all types. - */ - public Collection queryTypes() { - String[] types = getTypes(); - if (types == null || types.length == 0 || (types.length == 1 && types[0].equals("_all"))) { - DocumentMapper mapper = getMapperService().documentMapper(); - return mapper == null ? Collections.emptyList() : Collections.singleton(mapper.type()); - } - return Arrays.asList(types); - } - private SearchLookup lookup = null; /** @@ -377,8 +373,7 @@ public SearchLookup lookup() { if (this.lookup == null) { this.lookup = new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), - types + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) ); } return this.lookup; @@ -394,8 +389,7 @@ public SearchLookup newFetchLookup() { */ return new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), - types + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) ); } diff --git a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java index 9e0446ae4d802..e797730ac0dff 100644 --- a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java @@ -479,9 +479,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { } private void fetch(TermsLookup termsLookup, Client client, ActionListener> actionListener) { - GetRequest getRequest = termsLookup.type() == null - ? new GetRequest(termsLookup.index(), termsLookup.id()) - : new GetRequest(termsLookup.index(), termsLookup.type(), termsLookup.id()); + GetRequest getRequest = new GetRequest(termsLookup.index(), termsLookup.id()); getRequest.preference("_local").routing(termsLookup.routing()); client.get(getRequest, ActionListener.delegateFailure(actionListener, (delegatedListener, getResponse) -> { List terms = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java index b80dce43cbdcb..0de9a50dd1e8d 100644 --- a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java @@ -503,9 +503,6 @@ protected void searchToString(StringBuilder b) { } else { b.append("[all indices]"); } - if (searchRequest.types() != null && searchRequest.types().length != 0) { - b.append(Arrays.toString(searchRequest.types())); - } } @Override diff --git a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java index 7d5c0e151ddbc..6a4b55f5877e7 100644 --- a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java +++ b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java @@ -240,7 +240,6 @@ private static Object parseFailure(XContentParser parser) throws IOException { ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser); Token token; String index = null; - String type = null; String id = null; Integer status = null; Integer shardId = null; @@ -270,9 +269,6 @@ private static Object parseFailure(XContentParser parser) throws IOException { case Failure.INDEX_FIELD: index = parser.text(); break; - case Failure.TYPE_FIELD: - type = parser.text(); - break; case Failure.ID_FIELD: id = parser.text(); break; @@ -298,7 +294,7 @@ private static Object parseFailure(XContentParser parser) throws IOException { } } if (bulkExc != null) { - return new Failure(index, type, id, bulkExc, RestStatus.fromCode(status)); + return new Failure(index, id, bulkExc, RestStatus.fromCode(status)); } else if (searchExc != null) { if (status == null) { return new SearchFailure(searchExc, index, shardId, nodeId); diff --git a/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java index 6ad793c2fb911..6a78d16d3b385 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java @@ -91,9 +91,8 @@ public ClientScrollableHitSource( public void doStart(RejectAwareActionListener searchListener) { if (logger.isDebugEnabled()) { logger.debug( - "executing initial scroll against {}{}", - isEmpty(firstSearchRequest.indices()) ? "all indices" : firstSearchRequest.indices(), - isEmpty(firstSearchRequest.types()) ? "" : firstSearchRequest.types() + "executing initial scroll against {}", + isEmpty(firstSearchRequest.indices()) ? "all indices" : firstSearchRequest.indices() ); } client.search(firstSearchRequest, wrapListener(searchListener)); @@ -192,11 +191,6 @@ public String getIndex() { return delegate.getIndex(); } - @Override - public String getType() { - return delegate.getType(); - } - @Override public String getId() { return delegate.getId(); diff --git a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java index 74a11e62ed851..4f297b89c0651 100644 --- a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java @@ -102,19 +102,6 @@ public DeleteByQueryRequest setQuery(QueryBuilder query) { return this; } - /** - * Set the document types for the delete - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public DeleteByQueryRequest setDocTypes(String... types) { - if (types != null) { - getSearchRequest().types(types); - } - return this; - } - /** * Set routing limiting the process to the shards that match that routing value */ @@ -155,21 +142,6 @@ public String getRouting() { return getSearchRequest().routing(); } - /** - * Gets the document types on which this request would be executed. Returns an empty array if all - * types are to be processed. - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public String[] getDocTypes() { - if (getSearchRequest().types() != null) { - return getSearchRequest().types(); - } else { - return new String[0]; - } - } - @Override protected DeleteByQueryRequest self() { return this; @@ -223,29 +195,6 @@ public IndicesOptions indicesOptions() { return getSearchRequest().indicesOptions(); } - /** - * Gets the document types on which this request would be executed. - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public String[] types() { - assert getSearchRequest() != null; - return getSearchRequest().types(); - } - - /** - * Set the document types for the delete - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public DeleteByQueryRequest types(String... types) { - assert getSearchRequest() != null; - getSearchRequest().types(types); - return this; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java index 6631b721d1b88..5858b4b8108d2 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java @@ -50,7 +50,6 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.VersionType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilder; import org.opensearch.script.Script; import org.opensearch.search.sort.SortOrder; @@ -167,16 +166,6 @@ public ReindexRequest setSourceIndices(String... sourceIndices) { return this; } - /** - * Set the document types which need to be copied from the source indices - */ - public ReindexRequest setSourceDocTypes(String... docTypes) { - if (docTypes != null) { - this.getSearchRequest().types(docTypes); - } - return this; - } - /** * Sets the scroll size for setting how many documents are to be processed in one batch during reindex */ @@ -219,14 +208,6 @@ public ReindexRequest setDestIndex(String destIndex) { return this; } - /** - * Set the document type for the destination index - */ - public ReindexRequest setDestDocType(String docType) { - this.getDestination().type(docType); - return this; - } - /** * Set the routing to decide which shard the documents need to be routed to */ @@ -313,9 +294,6 @@ public String toString() { } searchToString(b); b.append(" to [").append(destination.index()).append(']'); - if (destination.type() != null) { - b.append('[').append(destination.type()).append(']'); - } return b.toString(); } @@ -330,10 +308,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.rawField("query", remoteInfo.getQuery().streamInput(), RemoteInfo.QUERY_CONTENT_TYPE.type()); } builder.array("index", getSearchRequest().indices()); - String[] types = getSearchRequest().types(); - if (types.length > 0) { - builder.array("type", types); - } getSearchRequest().source().innerToXContent(builder, params); builder.endObject(); } @@ -341,10 +315,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // build destination builder.startObject("dest"); builder.field("index", getDestination().index()); - String type = getDestination().type(); - if (type != null && type.equals(MapperService.SINGLE_MAPPING_NAME) == false) { - builder.field("type", getDestination().type()); - } if (getDestination().routing() != null) { builder.field("routing", getDestination().routing()); } @@ -383,11 +353,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (indices != null) { request.getSearchRequest().indices(indices); } - String[] types = extractStringArray(source, "type"); - if (types != null) { - deprecationLogger.deprecate("reindex_with_types", TYPES_DEPRECATION_MESSAGE); - request.getSearchRequest().types(types); - } request.setRemoteInfo(buildRemoteInfo(source)); XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); builder.map(source); @@ -403,10 +368,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws ObjectParser destParser = new ObjectParser<>("dest"); destParser.declareString(IndexRequest::index, new ParseField("index")); - destParser.declareString((request, type) -> { - deprecationLogger.deprecate("reindex_with_types", TYPES_DEPRECATION_MESSAGE); - request.type(type); - }, new ParseField("type")); destParser.declareString(IndexRequest::routing, new ParseField("routing")); destParser.declareString(IndexRequest::opType, new ParseField("op_type")); destParser.declareString(IndexRequest::setPipeline, new ParseField("pipeline")); diff --git a/server/src/main/java/org/opensearch/index/reindex/ReindexRequestBuilder.java b/server/src/main/java/org/opensearch/index/reindex/ReindexRequestBuilder.java index a8d518414a53d..291acd1e8ad8d 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ReindexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/index/reindex/ReindexRequestBuilder.java @@ -78,14 +78,6 @@ public ReindexRequestBuilder destination(String index) { return this; } - /** - * Set the destination index and type. - */ - public ReindexRequestBuilder destination(String index, String type) { - destination.setIndex(index).setType(type); - return this; - } - /** * Setup reindexing from a remote cluster. */ diff --git a/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java b/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java index c1a8a913b0cf5..f0c720e21a73e 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java +++ b/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java @@ -253,11 +253,6 @@ public interface Hit { */ String getIndex(); - /** - * The type that the hit has. - */ - String getType(); - /** * The document id of the hit. */ @@ -304,7 +299,6 @@ public interface Hit { */ public static class BasicHit implements Hit { private final String index; - private final String type; private final String id; private final long version; @@ -314,9 +308,8 @@ public static class BasicHit implements Hit { private long seqNo; private long primaryTerm; - public BasicHit(String index, String type, String id, long version) { + public BasicHit(String index, String id, long version) { this.index = index; - this.type = type; this.id = id; this.version = version; } @@ -326,11 +319,6 @@ public String getIndex() { return index; } - @Override - public String getType() { - return type; - } - @Override public String getId() { return id; diff --git a/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java index 67675f1d998bd..92bcef8455e63 100644 --- a/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java @@ -98,19 +98,6 @@ public UpdateByQueryRequest setQuery(QueryBuilder query) { return this; } - /** - * Set the document types for the update - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public UpdateByQueryRequest setDocTypes(String... types) { - if (types != null) { - getSearchRequest().types(types); - } - return this; - } - /** * Set routing limiting the process to the shards that match that routing value */ @@ -151,21 +138,6 @@ public String getRouting() { return getSearchRequest().routing(); } - /** - * Gets the document types on which this request would be executed. Returns an empty array if all - * types are to be processed. - * @deprecated Types are in the process of being removed. Instead of - * using a type, prefer to filter on a field of the document. - */ - @Deprecated - public String[] getDocTypes() { - if (getSearchRequest().types() != null) { - return getSearchRequest().types(); - } else { - return new String[0]; - } - } - /** * Ingest pipeline to set on index requests made by this action. */ diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index fa73498300c33..7aab597f8816c 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -230,9 +230,8 @@ public boolean hasProcessed(final long seqNo) { @SuppressForbidden(reason = "Object#notifyAll") private void updateCheckpoint(AtomicLong checkPoint, LongObjectHashMap bitSetMap) { assert Thread.holdsLock(this); - assert getBitSetForSeqNo(bitSetMap, checkPoint.get() + 1).get( - seqNoToBitSetOffset(checkPoint.get() + 1) - ) : "updateCheckpoint is called but the bit following the checkpoint is not set"; + assert getBitSetForSeqNo(bitSetMap, checkPoint.get() + 1).get(seqNoToBitSetOffset(checkPoint.get() + 1)) + : "updateCheckpoint is called but the bit following the checkpoint is not set"; try { // keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words long bitSetKey = getBitSetKey(checkPoint.get()); diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 7a0ece2628a3d..6988437473f1a 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -845,23 +845,15 @@ private boolean invariant() { assert pendingInSync.isEmpty() || (primaryMode && !handoffInProgress); // the computed global checkpoint is always up-to-date - assert !primaryMode - || globalCheckpoint == computeGlobalCheckpoint( - pendingInSync, - checkpoints.values(), - globalCheckpoint - ) : "global checkpoint is not up-to-date, expected: " + assert !primaryMode || globalCheckpoint == computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint) + : "global checkpoint is not up-to-date, expected: " + computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint) + " but was: " + globalCheckpoint; // when in primary mode, the global checkpoint is at most the minimum local checkpoint on all in-sync shard copies - assert !primaryMode - || globalCheckpoint <= inSyncCheckpointStates( - checkpoints, - CheckpointState::getLocalCheckpoint, - LongStream::min - ) : "global checkpoint [" + assert !primaryMode || globalCheckpoint <= inSyncCheckpointStates(checkpoints, CheckpointState::getLocalCheckpoint, LongStream::min) + : "global checkpoint [" + globalCheckpoint + "] " + "for primary mode allocation ID [" @@ -877,11 +869,8 @@ private boolean invariant() { + " but replication group is " + replicationGroup; - assert replicationGroup == null - || replicationGroup.equals(calculateReplicationGroup()) : "cached replication group out of sync: expected: " - + calculateReplicationGroup() - + " but was: " - + replicationGroup; + assert replicationGroup == null || replicationGroup.equals(calculateReplicationGroup()) + : "cached replication group out of sync: expected: " + calculateReplicationGroup() + " but was: " + replicationGroup; // all assigned shards from the routing table are tracked assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getAllAllocationIds()) : "local checkpoints " @@ -909,9 +898,8 @@ private boolean invariant() { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { if (checkpoints.get(shardRouting.allocationId().getId()).tracked) { - assert retentionLeases.contains( - getPeerRecoveryRetentionLeaseId(shardRouting) - ) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases; + assert retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) + : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases; assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals( retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)).source() ) : "incorrect source [" @@ -1192,13 +1180,11 @@ public synchronized void updateFromMaster( if (applyingClusterStateVersion > appliedClusterStateVersion) { // check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode assert !primaryMode - || inSyncAllocationIds.stream() - .allMatch( - inSyncId -> checkpoints.containsKey(inSyncId) && checkpoints.get(inSyncId).inSync - ) : "update from master in primary mode contains in-sync ids " - + inSyncAllocationIds - + " that have no matching entries in " - + checkpoints; + || inSyncAllocationIds.stream().allMatch(inSyncId -> checkpoints.containsKey(inSyncId) && checkpoints.get(inSyncId).inSync) + : "update from master in primary mode contains in-sync ids " + + inSyncAllocationIds + + " that have no matching entries in " + + checkpoints; // remove entries which don't exist on master Set initializingAllocationIds = routingTable.getAllInitializingShards() .stream() diff --git a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java index 0f44ddbb5e8f7..69f283a53ca79 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java @@ -101,13 +101,6 @@ default void indexShardStateChanged( @Nullable String reason ) {} - /** - * Called when a shard is marked as inactive - * - * @param indexShard The shard that was marked inactive - */ - default void onShardInactive(IndexShard indexShard) {} - /** * Called before the index gets created. Note that this is also called * when the index is created on data nodes diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 9f3520202c131..2ff7cfd67dc56 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -531,9 +531,8 @@ public void updateShardState( assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting; assert currentRouting.isRelocationTarget() == false || currentRouting.primary() == false - || replicationTracker - .isPrimaryMode() : "a primary relocation is completed by the master, but primary mode is not active " - + currentRouting; + || replicationTracker.isPrimaryMode() + : "a primary relocation is completed by the master, but primary mode is not active " + currentRouting; changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); } else if (currentRouting.primary() @@ -548,12 +547,8 @@ public void updateShardState( "Shard is marked as relocated, cannot safely move to state " + newRouting.state() ); } - assert newRouting.active() == false - || state == IndexShardState.STARTED - || state == IndexShardState.CLOSED : "routing is active, but local shard state isn't. routing: " - + newRouting - + ", local state: " - + state; + assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED + : "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; persistMetadata(path, indexSettings, newRouting, currentRouting, logger); final CountDownLatch shardStateUpdated = new CountDownLatch(1); @@ -741,8 +736,8 @@ public void relocated(final String targetAllocationId, final Consumer { forceRefreshes.close(); // no shard operation permits are being held here, move state from started to relocated - assert indexShardOperationPermits - .getActiveOperationsCount() == OPERATIONS_BLOCKED : "in-flight operations in progress while moving shard state to relocated"; + assert indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED + : "in-flight operations in progress while moving shard state to relocated"; /* * We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a * network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations. @@ -1261,7 +1256,7 @@ private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws I public Engine.GetResult get(Engine.Get get) { readAllowed(); DocumentMapper mapper = mapperService.documentMapper(); - if (mapper == null || mapper.type().equals(mapperService.resolveDocumentType(get.type())) == false) { + if (mapper == null) { return GetResult.NOT_EXISTS; } return getEngine().get(get, this::acquireSearcher); @@ -1340,7 +1335,7 @@ public IndexingStats indexingStats(String... types) { throttled = engine.isThrottled(); throttleTimeInMillis = engine.getIndexThrottleTimeInMillis(); } - return internalIndexingStats.stats(throttled, throttleTimeInMillis, types); + return internalIndexingStats.stats(throttled, throttleTimeInMillis); } public SearchStats searchStats(String... groups) { @@ -1393,19 +1388,12 @@ public CompletionStats completionStats(String... fields) { return getEngine().completionStats(fields); } - public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) { - verifyNotClosed(); - logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId); - return getEngine().syncFlush(syncId, expectedCommitId); - } - /** * Executes the given flush request against the engine. * * @param request the flush request - * @return the commit ID */ - public Engine.CommitId flush(FlushRequest request) { + public void flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); @@ -1416,9 +1404,8 @@ public Engine.CommitId flush(FlushRequest request) { */ verifyNotClosed(); final long time = System.nanoTime(); - final Engine.CommitId commitId = getEngine().flush(force, waitIfOngoing); + getEngine().flush(force, waitIfOngoing); flushMetric.inc(System.nanoTime() - time); - return commitId; } /** @@ -1631,9 +1618,8 @@ private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scop } private Engine.Searcher wrapSearcher(Engine.Searcher searcher) { - assert OpenSearchDirectoryReader.unwrap( - searcher.getDirectoryReader() - ) != null : "DirectoryReader must be an instance or OpenSearchDirectoryReader"; + assert OpenSearchDirectoryReader.unwrap(searcher.getDirectoryReader()) != null + : "DirectoryReader must be an instance or OpenSearchDirectoryReader"; boolean success = false; try { final Engine.Searcher newSearcher = readerWrapper == null ? searcher : wrapSearcher(searcher, readerWrapper); @@ -2061,8 +2047,8 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t // but we need to make sure we don't loose deletes until we are done recovering config.setEnableGcDeletes(false); updateRetentionLeasesOnReplica(loadRetentionLeases()); - assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false - || getRetentionLeases().leases().isEmpty() : "expected empty set of retention leases with recovery source [" + assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false || getRetentionLeases().leases().isEmpty() + : "expected empty set of retention leases with recovery source [" + recoveryState.getRecoverySource() + "] but got " + getRetentionLeases(); @@ -2074,7 +2060,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t onNewEngine(newEngine); currentEngineReference.set(newEngine); // We set active because we are now writing operations to the engine; this way, - // if we go idle after some time and become inactive, we still give sync'd flush a chance to run. + // we can flush if we go idle after some time and become inactive. active.set(true); } // time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during @@ -2202,9 +2188,8 @@ private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIn assert assertReplicationTarget(); } else { assert origin == Engine.Operation.Origin.LOCAL_RESET; - assert getActiveOperationsCount() == OPERATIONS_BLOCKED : "locally resetting without blocking operations, active operations are [" - + getActiveOperations() - + "]"; + assert getActiveOperationsCount() == OPERATIONS_BLOCKED + : "locally resetting without blocking operations, active operations are [" + getActiveOperations() + "]"; } if (writeAllowedStates.contains(state) == false) { throw new IllegalIndexShardStateException( @@ -2274,19 +2259,28 @@ public void addShardFailureCallback(Consumer onShardFailure) { /** * Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last - * indexing operation, and notify listeners that we are now inactive so e.g. sync'd flush can happen. + * indexing operation, so we can flush the index. */ - public void checkIdle(long inactiveTimeNS) { + public void flushOnIdle(long inactiveTimeNS) { Engine engineOrNull = getEngineOrNull(); if (engineOrNull != null && System.nanoTime() - engineOrNull.getLastWriteNanos() >= inactiveTimeNS) { boolean wasActive = active.getAndSet(false); if (wasActive) { - logger.debug("shard is now inactive"); - try { - indexEventListener.onShardInactive(this); - } catch (Exception e) { - logger.warn("failed to notify index event listener", e); - } + logger.debug("flushing shard on inactive"); + threadPool.executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + if (state != IndexShardState.CLOSED) { + logger.warn("failed to flush shard on inactive", e); + } + } + + @Override + protected void doRun() { + flush(new FlushRequest().waitIfOngoing(false).force(false)); + periodicFlushMetric.inc(); + } + }); } } } @@ -2422,23 +2416,8 @@ protected void doRun() { /** * Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed */ - public Closeable acquireHistoryRetentionLock(Engine.HistorySource source) { - return getEngine().acquireHistoryRetentionLock(source); - } - - /** - * Returns the estimated number of history operations whose seq# at least the provided seq# in this shard. - */ - public int estimateNumberOfHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException { - return getEngine().estimateNumberOfHistoryOperations(reason, source, mapperService, startingSeqNo); - } - - /** - * Creates a new history snapshot for reading operations since the provided starting seqno (inclusive). - * The returned snapshot can be retrieved from either Lucene index or translog files. - */ - public Translog.Snapshot getHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException { - return getEngine().readHistoryOperations(reason, source, mapperService, startingSeqNo); + public Closeable acquireHistoryRetentionLock() { + return getEngine().acquireHistoryRetentionLock(); } /** @@ -2447,17 +2426,16 @@ public Translog.Snapshot getHistoryOperations(String reason, Engine.HistorySourc * the provided starting seqno (inclusive) and ending seqno (inclusive) * The returned snapshot can be retrieved from either Lucene index or translog files. */ - public Translog.Snapshot getHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo, long endSeqNo) - throws IOException { - return getEngine().newChangesSnapshot(reason, source, mapperService, startingSeqNo, endSeqNo, true); + public Translog.Snapshot getHistoryOperations(String reason, long startingSeqNo, long endSeqNo) throws IOException { + return getEngine().newChangesSnapshot(reason, mapperService, startingSeqNo, endSeqNo, true); } /** * Checks if we have a completed history of operations since the given starting seqno (inclusive). - * This method should be called after acquiring the retention lock; See {@link #acquireHistoryRetentionLock(Engine.HistorySource)} + * This method should be called after acquiring the retention lock; See {@link #acquireHistoryRetentionLock()} */ - public boolean hasCompleteHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException { - return getEngine().hasCompleteOperationHistory(reason, source, mapperService, startingSeqNo); + public boolean hasCompleteHistoryOperations(String reason, long startingSeqNo) { + return getEngine().hasCompleteOperationHistory(reason, startingSeqNo); } /** @@ -2647,7 +2625,7 @@ public RetentionLease addRetentionLease( assert assertPrimaryMode(); verifyNotClosed(); ensureSoftDeletesEnabled("retention leases"); - try (Closeable ignore = acquireHistoryRetentionLock(Engine.HistorySource.INDEX)) { + try (Closeable ignore = acquireHistoryRetentionLock()) { final long actualRetainingSequenceNumber = retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber; @@ -2670,7 +2648,7 @@ public RetentionLease renewRetentionLease(final String id, final long retainingS assert assertPrimaryMode(); verifyNotClosed(); ensureSoftDeletesEnabled("retention leases"); - try (Closeable ignore = acquireHistoryRetentionLock(Engine.HistorySource.INDEX)) { + try (Closeable ignore = acquireHistoryRetentionLock()) { final long actualRetainingSequenceNumber = retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber; @@ -2905,8 +2883,8 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move * to recovery finalization, or even finished recovery before the update arrives here. */ - assert state() != IndexShardState.POST_RECOVERY - && state() != IndexShardState.STARTED : "supposedly in-sync shard copy received a global checkpoint [" + assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED + : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" @@ -2923,9 +2901,8 @@ && state() != IndexShardState.STARTED : "supposedly in-sync shard copy received * @param primaryContext the sequence number context */ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) { - assert shardRouting.primary() - && shardRouting.isRelocationTarget() : "only primary relocation target can update allocation IDs from primary context: " - + shardRouting; + assert shardRouting.primary() && shardRouting.isRelocationTarget() + : "only primary relocation target can update allocation IDs from primary context: " + shardRouting; assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) : "primary context [" + primaryContext + "] does not contain relocation target [" @@ -2967,11 +2944,9 @@ public boolean pendingInSync() { /** * Should be called for each no-op update operation to increment relevant statistics. - * - * @param type the doc type of the update */ - public void noopUpdate(String type) { - internalIndexingStats.noopUpdate(type); + public void noopUpdate() { + internalIndexingStats.noopUpdate(); } public void maybeCheckIndex() { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index 0f64f97a256ee..bdc5373e0b9b3 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -32,7 +32,7 @@ package org.opensearch.index.shard; -import org.opensearch.common.Nullable; +import org.opensearch.Version; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -40,9 +40,9 @@ import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.mapper.MapperService; import java.io.IOException; -import java.util.HashMap; import java.util.Map; public class IndexingStats implements Writeable, ToXContentFragment { @@ -219,47 +219,30 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws private final Stats totalStats; - @Nullable - private Map typeStats; - public IndexingStats() { totalStats = new Stats(); } public IndexingStats(StreamInput in) throws IOException { totalStats = new Stats(in); - if (in.readBoolean()) { - typeStats = in.readMap(StreamInput::readString, Stats::new); + if (in.getVersion().before(Version.V_2_0_0)) { + if (in.readBoolean()) { + Map typeStats = in.readMap(StreamInput::readString, Stats::new); + assert typeStats.size() == 1; + assert typeStats.containsKey(MapperService.SINGLE_MAPPING_NAME); + } } } - public IndexingStats(Stats totalStats, @Nullable Map typeStats) { + public IndexingStats(Stats totalStats) { this.totalStats = totalStats; - this.typeStats = typeStats; } public void add(IndexingStats indexingStats) { - add(indexingStats, true); - } - - public void add(IndexingStats indexingStats, boolean includeTypes) { if (indexingStats == null) { return; } addTotals(indexingStats); - if (includeTypes && indexingStats.typeStats != null && !indexingStats.typeStats.isEmpty()) { - if (typeStats == null) { - typeStats = new HashMap<>(indexingStats.typeStats.size()); - } - for (Map.Entry entry : indexingStats.typeStats.entrySet()) { - Stats stats = typeStats.get(entry.getKey()); - if (stats == null) { - typeStats.put(entry.getKey(), entry.getValue()); - } else { - stats.add(entry.getValue()); - } - } - } } public void addTotals(IndexingStats indexingStats) { @@ -273,31 +256,16 @@ public Stats getTotal() { return this.totalStats; } - @Nullable - public Map getTypeStats() { - return this.typeStats; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(Fields.INDEXING); totalStats.toXContent(builder, params); - if (typeStats != null && !typeStats.isEmpty()) { - builder.startObject(Fields.TYPES); - for (Map.Entry entry : typeStats.entrySet()) { - builder.startObject(entry.getKey()); - entry.getValue().toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); - } builder.endObject(); return builder; } static final class Fields { static final String INDEXING = "indexing"; - static final String TYPES = "types"; static final String INDEX_TOTAL = "index_total"; static final String INDEX_TIME = "index_time"; static final String INDEX_TIME_IN_MILLIS = "index_time_in_millis"; @@ -316,11 +284,8 @@ static final class Fields { @Override public void writeTo(StreamOutput out) throws IOException { totalStats.writeTo(out); - if (typeStats == null || typeStats.isEmpty()) { + if (out.getVersion().before(Version.V_2_0_0)) { out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeMap(typeStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); } } } diff --git a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java index ac5ae9a59fc66..76d64ab918163 100644 --- a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java @@ -32,56 +32,33 @@ package org.opensearch.index.shard; -import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; -import org.opensearch.common.regex.Regex; import org.opensearch.index.engine.Engine; -import java.util.HashMap; -import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.emptyMap; - /** * Internal class that maintains relevant indexing statistics / metrics. * @see IndexShard */ final class InternalIndexingStats implements IndexingOperationListener { private final StatsHolder totalStats = new StatsHolder(); - private volatile Map typesStats = emptyMap(); /** * Returns the stats, including type specific stats. If the types are null/0 length, then nothing * is returned for them. If they are set, then only types provided will be returned, or * {@code _all} for all types. */ - IndexingStats stats(boolean isThrottled, long currentThrottleInMillis, String... types) { + IndexingStats stats(boolean isThrottled, long currentThrottleInMillis) { IndexingStats.Stats total = totalStats.stats(isThrottled, currentThrottleInMillis); - Map typesSt = null; - if (types != null && types.length > 0) { - typesSt = new HashMap<>(typesStats.size()); - if (types.length == 1 && types[0].equals("_all")) { - for (Map.Entry entry : typesStats.entrySet()) { - typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis)); - } - } else { - for (Map.Entry entry : typesStats.entrySet()) { - if (Regex.simpleMatch(types, entry.getKey())) { - typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis)); - } - } - } - } - return new IndexingStats(total, typesSt); + return new IndexingStats(total); } @Override public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { if (operation.origin().isRecovery() == false) { totalStats.indexCurrent.inc(); - typeStats(operation.type()).indexCurrent.inc(); } return operation; } @@ -94,9 +71,6 @@ public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult re long took = result.getTook(); totalStats.indexMetric.inc(took); totalStats.indexCurrent.dec(); - StatsHolder typeStats = typeStats(index.type()); - typeStats.indexMetric.inc(took); - typeStats.indexCurrent.dec(); } break; case FAILURE: @@ -111,9 +85,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult re public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { if (!index.origin().isRecovery()) { totalStats.indexCurrent.dec(); - typeStats(index.type()).indexCurrent.dec(); totalStats.indexFailed.inc(); - typeStats(index.type()).indexFailed.inc(); } } @@ -121,7 +93,6 @@ public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { if (!delete.origin().isRecovery()) { totalStats.deleteCurrent.inc(); - typeStats(delete.type()).deleteCurrent.inc(); } return delete; @@ -135,9 +106,6 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul long took = result.getTook(); totalStats.deleteMetric.inc(took); totalStats.deleteCurrent.dec(); - StatsHolder typeStats = typeStats(delete.type()); - typeStats.deleteMetric.inc(took); - typeStats.deleteCurrent.dec(); } break; case FAILURE: @@ -152,27 +120,11 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { if (!delete.origin().isRecovery()) { totalStats.deleteCurrent.dec(); - typeStats(delete.type()).deleteCurrent.dec(); } } - public void noopUpdate(String type) { + void noopUpdate() { totalStats.noopUpdates.inc(); - typeStats(type).noopUpdates.inc(); - } - - private StatsHolder typeStats(String type) { - StatsHolder stats = typesStats.get(type); - if (stats == null) { - synchronized (this) { - stats = typesStats.get(type); - if (stats == null) { - stats = new StatsHolder(); - typesStats = MapBuilder.newMapBuilder(typesStats).put(type, stats).immutableMap(); - } - } - } - return stats; } static class StatsHolder { diff --git a/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java index b5e40881cfd43..bbdf948af5c32 100644 --- a/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java @@ -49,7 +49,6 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.core.internal.io.IOUtils; -import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.Translog; import org.opensearch.tasks.Task; @@ -99,16 +98,13 @@ public void resync(final IndexShard indexShard, final ActionListener Translog.Snapshot snapshot = null; try { final long startingSeqNo = indexShard.getLastKnownGlobalCheckpoint() + 1; + assert startingSeqNo >= 0 : "startingSeqNo must be non-negative; got [" + startingSeqNo + "]"; final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); final ShardId shardId = indexShard.shardId(); // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. // Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible // Also fail the resync early if the shard is shutting down - snapshot = indexShard.getHistoryOperations( - "resync", - indexShard.indexSettings.isSoftDeleteEnabled() ? Engine.HistorySource.INDEX : Engine.HistorySource.TRANSLOG, - startingSeqNo - ); + snapshot = indexShard.newChangesSnapshot("resync", startingSeqNo, Long.MAX_VALUE, false); final Translog.Snapshot originalSnapshot = snapshot; final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() { @Override diff --git a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java index 13aef9587aea6..fc52d09361281 100644 --- a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java +++ b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java @@ -85,9 +85,8 @@ public ReplicationGroup( replicationTargets.add(relocationTarget); } else { skippedShards.add(relocationTarget); - assert inSyncAllocationIds.contains( - relocationTarget.allocationId().getId() - ) == false : "in-sync shard copy but not tracked: " + shard; + assert inSyncAllocationIds.contains(relocationTarget.allocationId().getId()) == false + : "in-sync shard copy but not tracked: " + shard; } } } diff --git a/server/src/main/java/org/opensearch/index/shard/ShardPath.java b/server/src/main/java/org/opensearch/index/shard/ShardPath.java index 3d49a3c730700..39f86ea362bb1 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardPath.java @@ -62,17 +62,12 @@ public final class ShardPath { public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) { assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " + dataPath.toString(); - assert shardStatePath.getFileName() - .toString() - .equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString(); - assert dataPath.getParent() - .getFileName() - .toString() - .equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString(); - assert shardStatePath.getParent() - .getFileName() - .toString() - .equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString(); + assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) + : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString(); + assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) + : "dataPath must end with index path id but didn't: " + dataPath.toString(); + assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) + : "shardStatePath must end with index path id but didn't: " + dataPath.toString(); if (isCustomDataPath && dataPath.equals(shardStatePath)) { throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths"); } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 2b0f58ffe6b35..485d43d9a470f 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -103,8 +103,8 @@ final class StoreRecovery { void recoverFromStore(final IndexShard indexShard, ActionListener listener) { if (canRecover(indexShard)) { RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); - assert recoveryType == RecoverySource.Type.EMPTY_STORE - || recoveryType == RecoverySource.Type.EXISTING_STORE : "expected store recovery type but was: " + recoveryType; + assert recoveryType == RecoverySource.Type.EMPTY_STORE || recoveryType == RecoverySource.Type.EXISTING_STORE + : "expected store recovery type but was: " + recoveryType; ActionListener.completeWith(recoveryListener(indexShard, listener), () -> { logger.debug("starting recovery from store ..."); internalRecoverFromStore(indexShard); diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 3fa2e0040db80..893d87e874b4a 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -50,7 +50,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.BufferedChecksum; -import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; @@ -67,6 +66,7 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.Streams; +import org.opensearch.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -1126,9 +1126,8 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { Collections.unmodifiableList(different), Collections.unmodifiableList(missing) ); - assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) - ? 1 - : 0) : "some files are missing recoveryDiff size: [" + assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0) + : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + this.metadata.size() @@ -1457,11 +1456,11 @@ public IndexInput slice(String sliceDescription, long offset, long length) throw throw new UnsupportedOperationException(); } - public long getStoredChecksum() { - return new ByteArrayDataInput(checksum).readLong(); + public long getStoredChecksum() throws IOException { + return new BytesStreamInput(checksum).readLong(); } - public long verify() throws CorruptIndexException { + public long verify() throws CorruptIndexException, IOException { long storedChecksum = getStoredChecksum(); if (getChecksum() == storedChecksum) { return storedChecksum; diff --git a/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java index 9ba59264df7ad..50cd160ecb00d 100644 --- a/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java @@ -93,11 +93,7 @@ public static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVect static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequest request, LongSupplier nanoTimeSupplier) { final long startTime = nanoTimeSupplier.getAsLong(); - final TermVectorsResponse termVectorsResponse = new TermVectorsResponse( - indexShard.shardId().getIndex().getName(), - request.type(), - request.id() - ); + final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(), request.id()); final Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(request.id())); Fields termVectorsByField = null; @@ -110,7 +106,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ try ( Engine.GetResult get = indexShard.get( - new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm).version(request.version()) + new Engine.Get(request.realtime(), false, request.id(), uidTerm).version(request.version()) .versionType(request.versionType()) ); Engine.Searcher searcher = indexShard.acquireSearcher("term_vector") @@ -238,7 +234,7 @@ private static Fields addGeneratedTermVectors( /* generate term vectors from fetched document fields */ String[] getFields = validFields.toArray(new String[validFields.size() + 1]); getFields[getFields.length - 1] = SourceFieldMapper.NAME; - GetResult getResult = indexShard.getService().get(get, request.id(), request.type(), getFields, null); + GetResult getResult = indexShard.getService().get(get, request.id(), getFields, null); Fields generatedTermVectors = generateTermVectors( indexShard, getResult.sourceAsMap(), @@ -329,7 +325,6 @@ private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVect ParsedDocument parsedDocument = parseDocument( indexShard, indexShard.shardId().getIndexName(), - request.type(), request.doc(), request.xContentType(), request.routing() @@ -389,15 +384,14 @@ public static String[] getValues(IndexableField[] fields) { private static ParsedDocument parseDocument( IndexShard indexShard, String index, - String type, BytesReference doc, XContentType xContentType, String routing ) { MapperService mapperService = indexShard.mapperService(); - DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); + DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(MapperService.SINGLE_MAPPING_NAME); ParsedDocument parsedDocument = docMapper.getDocumentMapper() - .parse(new SourceToParse(index, type, "_id_for_tv_api", doc, xContentType, routing)); + .parse(new SourceToParse(index, MapperService.SINGLE_MAPPING_NAME, "_id_for_tv_api", doc, xContentType, routing)); if (docMapper.getMapping() != null) { parsedDocument.addDynamicMappingsUpdate(docMapper.getMapping()); } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index ff16eb237a500..dc7a2bb331808 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -199,10 +199,8 @@ public Translog( // // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that // file exists. If not we don't even try to clean it up and wait until we fail creating it - assert Files.exists(nextTranslogFile) == false - || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" - + nextTranslogFile - + "]"; + assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) + : "unexpected translog file: [" + nextTranslogFile + "]"; if (Files.exists(currentCheckpointFile) // current checkpoint is already copied && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning logger.warn( @@ -399,7 +397,8 @@ private static boolean calledFromOutsideOrViaTragedyClose() { @Override public void close() throws IOException { - assert calledFromOutsideOrViaTragedyClose() : "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; + assert calledFromOutsideOrViaTragedyClose() + : "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; if (closed.compareAndSet(false, true)) { try (ReleasableLock lock = writeLock.acquire()) { try { @@ -439,11 +438,8 @@ public long getMinFileGeneration() { if (readers.isEmpty()) { return current.getGeneration(); } else { - assert readers.stream() - .map(TranslogReader::getGeneration) - .min(Long::compareTo) - .get() - .equals(readers.get(0).getGeneration()) : "the first translog isn't the one with the minimum generation:" + readers; + assert readers.stream().map(TranslogReader::getGeneration).min(Long::compareTo).get().equals(readers.get(0).getGeneration()) + : "the first translog isn't the one with the minimum generation:" + readers; return readers.get(0).getGeneration(); } } @@ -740,10 +736,8 @@ private Snapshot newMultiSnapshot(TranslogSnapshot[] snapshots) throws IOExcepti if (snapshots.length == 0) { onClose = () -> {}; } else { - assert Arrays.stream(snapshots) - .map(BaseTranslogReader::getGeneration) - .min(Long::compareTo) - .get() == snapshots[0].generation : "first reader generation of " + snapshots + " is not the smallest"; + assert Arrays.stream(snapshots).map(BaseTranslogReader::getGeneration).min(Long::compareTo).get() == snapshots[0].generation + : "first reader generation of " + snapshots + " is not the smallest"; onClose = acquireTranslogGenFromDeletionPolicy(snapshots[0].generation); } boolean success = false; @@ -759,8 +753,8 @@ private Snapshot newMultiSnapshot(TranslogSnapshot[] snapshots) throws IOExcepti } private Stream readersAboveMinSeqNo(long minSeqNo) { - assert readLock.isHeldByCurrentThread() - || writeLock.isHeldByCurrentThread() : "callers of readersAboveMinSeqNo must hold a lock: readLock [" + assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread() + : "callers of readersAboveMinSeqNo must hold a lock: readLock [" + readLock.isHeldByCurrentThread() + "], writeLock [" + readLock.isHeldByCurrentThread() @@ -1806,8 +1800,8 @@ public void trimUnreferencedReaders() throws IOException { current.sync(); deleteReaderFiles(reader); } - assert readers.isEmpty() == false - || current.generation == minReferencedGen : "all readers were cleaned but the minReferenceGen [" + assert readers.isEmpty() == false || current.generation == minReferencedGen + : "all readers were cleaned but the minReferenceGen [" + minReferencedGen + "] is not the current writer's gen [" + current.generation diff --git a/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java b/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java index 6e74882fdac37..1917a224e5410 100644 --- a/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java @@ -329,7 +329,7 @@ private void runUnlocked() { long totalBytesWriting = 0; for (IndexShard shard : availableShards()) { - // Give shard a chance to transition to inactive so sync'd flush can happen: + // Give shard a chance to transition to inactive so we can flush: checkIdle(shard, inactiveTime.nanos()); // How many bytes this shard is currently (async'd) moving from heap to disk: @@ -443,7 +443,7 @@ private void runUnlocked() { */ protected void checkIdle(IndexShard shard, long inactiveTimeNS) { try { - shard.checkIdle(inactiveTimeNS); + shard.flushOnIdle(inactiveTimeNS); } catch (AlreadyClosedException e) { logger.trace(() -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index 49cedb6a3723c..50ead5519d574 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -72,7 +72,6 @@ import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.indices.cluster.IndicesClusterStateService; -import org.opensearch.indices.flush.SyncedFlushService; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.replication.checkpoint.TransportPublishShardCheckpointAction; import org.opensearch.indices.store.IndicesStore; @@ -271,7 +270,6 @@ private static Function> and( protected void configure() { bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); - bind(SyncedFlushService.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetadata.class).asEagerSingleton(); bind(GlobalCheckpointSyncAction.class).asEagerSingleton(); bind(TransportResyncReplicationAction.class).asEagerSingleton(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index c64ebf82c6359..5bf0cd6a2faf7 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -865,8 +865,8 @@ public IndexShard createShard( recoveryListener, repositoriesService, (type, mapping) -> { - assert recoveryState.getRecoverySource() - .getType() == RecoverySource.Type.LOCAL_SHARDS : "mapping update consumer only required by local shards recovery"; + assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS + : "mapping update consumer only required by local shards recovery"; client.admin() .indices() .preparePutMapping() diff --git a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java index 52b8414159c1d..ec5d1edf3c448 100644 --- a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -140,26 +140,6 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { Property.NodeScope ); - public static final Setting ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.memorySizeSetting( - "indices.breaker.accounting.limit", - "100%", - Property.Dynamic, - Property.NodeScope - ); - public static final Setting ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting( - "indices.breaker.accounting.overhead", - 1.0d, - 0.0d, - Property.Dynamic, - Property.NodeScope - ); - public static final Setting ACCOUNTING_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>( - "indices.breaker.accounting.type", - "memory", - CircuitBreaker.Type::parseValue, - Property.NodeScope - ); - public static final Setting IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.memorySizeSetting( "network.breaker.inflight_requests.limit", "100%", @@ -236,18 +216,6 @@ public HierarchyCircuitBreakerService(Settings settings, List c ) ) ); - childCircuitBreakers.put( - CircuitBreaker.ACCOUNTING, - validateAndCreateBreaker( - new BreakerSettings( - CircuitBreaker.ACCOUNTING, - ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), - ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), - ACCOUNTING_CIRCUIT_BREAKER_TYPE_SETTING.get(settings), - CircuitBreaker.Durability.PERMANENT - ) - ) - ); for (BreakerSettings breakerSettings : customBreakers) { if (childCircuitBreakers.containsKey(breakerSettings.getName())) { throw new IllegalArgumentException( @@ -290,11 +258,6 @@ public HierarchyCircuitBreakerService(Settings settings, List c REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, (limit, overhead) -> updateCircuitBreakerSettings(CircuitBreaker.REQUEST, limit, overhead) ); - clusterSettings.addSettingsUpdateConsumer( - ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, - ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, - (limit, overhead) -> updateCircuitBreakerSettings(CircuitBreaker.ACCOUNTING, limit, overhead) - ); clusterSettings.addAffixUpdateConsumer( CIRCUIT_BREAKER_LIMIT_SETTING, CIRCUIT_BREAKER_OVERHEAD_SETTING, diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 58f8fabd292c2..b28488b9f554d 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -76,7 +76,6 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.flush.SyncedFlushService; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; @@ -151,7 +150,6 @@ public IndicesClusterStateService( final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, final SearchService searchService, - final SyncedFlushService syncedFlushService, final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, @@ -172,7 +170,6 @@ public IndicesClusterStateService( nodeMappingRefreshAction, repositoriesService, searchService, - syncedFlushService, peerRecoverySourceService, snapshotShardsService, primaryReplicaSyncer, @@ -194,7 +191,6 @@ public IndicesClusterStateService( final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, final SearchService searchService, - final SyncedFlushService syncedFlushService, final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, @@ -204,13 +200,7 @@ public IndicesClusterStateService( this.settings = settings; this.segmentReplicationReplicaService = replicationReplicaService; this.replicationSource = replicationSource; - this.buildInIndexListener = Arrays.asList( - peerRecoverySourceService, - recoveryTargetService, - searchService, - syncedFlushService, - snapshotShardsService - ); + this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, snapshotShardsService); this.indicesService = indicesService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -664,13 +654,12 @@ private void updateShard( ClusterState clusterState ) { final ShardRouting currentRoutingEntry = shard.routingEntry(); - assert currentRoutingEntry.isSameAllocation( - shardRouting - ) : "local shard has a different allocation id but wasn't cleaned by removeShards. " - + "cluster state: " - + shardRouting - + " local: " - + currentRoutingEntry; + assert currentRoutingEntry.isSameAllocation(shardRouting) + : "local shard has a different allocation id but wasn't cleaned by removeShards. " + + "cluster state: " + + shardRouting + + " local: " + + currentRoutingEntry; final long primaryTerm; try { diff --git a/server/src/main/java/org/opensearch/indices/flush/ShardsSyncedFlushResult.java b/server/src/main/java/org/opensearch/indices/flush/ShardsSyncedFlushResult.java deleted file mode 100644 index 9bc5b3f7a7720..0000000000000 --- a/server/src/main/java/org/opensearch/indices/flush/ShardsSyncedFlushResult.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.flush; - -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.io.stream.Writeable; -import org.opensearch.index.shard.ShardId; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.unmodifiableMap; - -/** - * Result for all copies of a shard - */ -public class ShardsSyncedFlushResult implements Writeable { - private String failureReason; - private Map shardResponses; - private String syncId; - private ShardId shardId; - // some shards may be unassigned, so we need this as state - private int totalShards; - - public ShardsSyncedFlushResult(StreamInput in) throws IOException { - failureReason = in.readOptionalString(); - int numResponses = in.readInt(); - shardResponses = new HashMap<>(); - for (int i = 0; i < numResponses; i++) { - ShardRouting shardRouting = new ShardRouting(in); - SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); - shardResponses.put(shardRouting, response); - } - syncId = in.readOptionalString(); - shardId = new ShardId(in); - totalShards = in.readInt(); - } - - public ShardId getShardId() { - return shardId; - } - - /** - * failure constructor - */ - public ShardsSyncedFlushResult(ShardId shardId, int totalShards, String failureReason) { - this.syncId = null; - this.failureReason = failureReason; - this.shardResponses = emptyMap(); - this.shardId = shardId; - this.totalShards = totalShards; - } - - /** - * success constructor - */ - public ShardsSyncedFlushResult( - ShardId shardId, - String syncId, - int totalShards, - Map shardResponses - ) { - this.failureReason = null; - this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses)); - this.syncId = syncId; - this.totalShards = totalShards; - this.shardId = shardId; - } - - /** - * @return true if the operation failed before reaching step three of synced flush. {@link #failureReason()} can be used for - * more details - */ - public boolean failed() { - return failureReason != null; - } - - /** - * @return the reason for the failure if synced flush failed before step three of synced flush - */ - public String failureReason() { - return failureReason; - } - - public String syncId() { - return syncId; - } - - /** - * @return total number of shards for which a sync attempt was made - */ - public int totalShards() { - return totalShards; - } - - /** - * @return total number of successful shards - */ - public int successfulShards() { - int i = 0; - for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) { - if (result.success()) { - i++; - } - } - return i; - } - - /** - * @return an array of shard failures - */ - public Map failedShards() { - Map failures = new HashMap<>(); - for (Map.Entry result : shardResponses.entrySet()) { - if (result.getValue().success() == false) { - failures.put(result.getKey(), result.getValue()); - } - } - return failures; - } - - /** - * @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush. - * Empty if synced flush failed before step three. - */ - public Map shardResponses() { - return shardResponses; - } - - public ShardId shardId() { - return shardId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(failureReason); - out.writeInt(shardResponses.size()); - for (Map.Entry entry : shardResponses.entrySet()) { - entry.getKey().writeTo(out); - entry.getValue().writeTo(out); - } - out.writeOptionalString(syncId); - shardId.writeTo(out); - out.writeInt(totalShards); - } -} diff --git a/server/src/main/java/org/opensearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/opensearch/indices/flush/SyncedFlushService.java deleted file mode 100644 index 88c1fd03d9c54..0000000000000 --- a/server/src/main/java/org/opensearch/indices/flush/SyncedFlushService.java +++ /dev/null @@ -1,891 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.flush; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; -import org.opensearch.action.StepListener; -import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.flush.SyncedFlushResponse; -import org.opensearch.action.support.IndicesOptions; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; -import org.opensearch.common.UUIDs; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.CountDown; -import org.opensearch.index.Index; -import org.opensearch.index.IndexNotFoundException; -import org.opensearch.index.IndexService; -import org.opensearch.index.engine.CommitStats; -import org.opensearch.index.engine.Engine; -import org.opensearch.index.shard.IndexEventListener; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.IndexShardState; -import org.opensearch.index.shard.ShardId; -import org.opensearch.index.shard.ShardNotFoundException; -import org.opensearch.indices.IndexClosedException; -import org.opensearch.indices.IndicesService; -import org.opensearch.tasks.Task; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponseHandler; -import org.opensearch.transport.TransportService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; - -public class SyncedFlushService implements IndexEventListener { - - private static final Logger logger = LogManager.getLogger(SyncedFlushService.class); - - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(logger.getName()); - - public static final String SYNCED_FLUSH_DEPRECATION_MESSAGE = - "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."; - - private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; - private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync"; - private static final String IN_FLIGHT_OPS_ACTION_NAME = "internal:indices/flush/synced/in_flight"; - - private final IndicesService indicesService; - private final ClusterService clusterService; - private final TransportService transportService; - private final IndexNameExpressionResolver indexNameExpressionResolver; - - @Inject - public SyncedFlushService( - IndicesService indicesService, - ClusterService clusterService, - TransportService transportService, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - this.indicesService = indicesService; - this.clusterService = clusterService; - this.transportService = transportService; - this.indexNameExpressionResolver = indexNameExpressionResolver; - transportService.registerRequestHandler( - PRE_SYNCED_FLUSH_ACTION_NAME, - ThreadPool.Names.FLUSH, - PreShardSyncedFlushRequest::new, - new PreSyncedFlushTransportHandler() - ); - transportService.registerRequestHandler( - SYNCED_FLUSH_ACTION_NAME, - ThreadPool.Names.FLUSH, - ShardSyncedFlushRequest::new, - new SyncedFlushTransportHandler() - ); - transportService.registerRequestHandler( - IN_FLIGHT_OPS_ACTION_NAME, - ThreadPool.Names.SAME, - InFlightOpsRequest::new, - new InFlightOpCountTransportHandler() - ); - } - - @Override - public void onShardInactive(final IndexShard indexShard) { - // A normal flush has the same effect as a synced flush if all nodes are on 7.6 or later. - final boolean preferNormalFlush = clusterService.state().nodes().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_6_0); - if (preferNormalFlush) { - performNormalFlushOnInactive(indexShard); - } else if (indexShard.routingEntry().primary()) { - // we only want to call sync flush once, so only trigger it when we are on a primary - attemptSyncedFlush(indexShard.shardId(), new ActionListener() { - @Override - public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { - logger.trace( - "{} sync flush on inactive shard returned successfully for sync_id: {}", - syncedFlushResult.getShardId(), - syncedFlushResult.syncId() - ); - } - - @Override - public void onFailure(Exception e) { - logger.debug(() -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); - } - }); - } - } - - private void performNormalFlushOnInactive(IndexShard shard) { - logger.debug("flushing shard {} on inactive", shard.routingEntry()); - shard.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (shard.state() != IndexShardState.CLOSED) { - logger.warn(new ParameterizedMessage("failed to flush shard {} on inactive", shard.routingEntry()), e); - } - } - - @Override - protected void doRun() { - shard.flush(new FlushRequest().force(false).waitIfOngoing(false)); - } - }); - } - - /** - * a utility method to perform a synced flush for all shards of multiple indices. - * see {@link #attemptSyncedFlush(ShardId, ActionListener)} - * for more details. - */ - public void attemptSyncedFlush( - final String[] aliasesOrIndices, - IndicesOptions indicesOptions, - final ActionListener listener - ) { - final ClusterState state = clusterService.state(); - if (state.nodes().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - DEPRECATION_LOGGER.deprecate("synced_flush", SYNCED_FLUSH_DEPRECATION_MESSAGE); - } - final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); - final Map> results = ConcurrentCollections.newConcurrentMap(); - int numberOfShards = 0; - for (Index index : concreteIndices) { - final IndexMetadata indexMetadata = state.metadata().getIndexSafe(index); - numberOfShards += indexMetadata.getNumberOfShards(); - results.put(index.getName(), Collections.synchronizedList(new ArrayList<>())); - - } - if (numberOfShards == 0) { - listener.onResponse(new SyncedFlushResponse(results)); - return; - } - final CountDown countDown = new CountDown(numberOfShards); - - for (final Index concreteIndex : concreteIndices) { - final String index = concreteIndex.getName(); - final IndexMetadata indexMetadata = state.metadata().getIndexSafe(concreteIndex); - final int indexNumberOfShards = indexMetadata.getNumberOfShards(); - for (int shard = 0; shard < indexNumberOfShards; shard++) { - final ShardId shardId = new ShardId(indexMetadata.getIndex(), shard); - innerAttemptSyncedFlush(shardId, state, new ActionListener() { - @Override - public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { - results.get(index).add(syncedFlushResult); - if (countDown.countDown()) { - listener.onResponse(new SyncedFlushResponse(results)); - } - } - - @Override - public void onFailure(Exception e) { - logger.debug("{} unexpected error while executing synced flush", shardId); - final int totalShards = indexMetadata.getNumberOfReplicas() + 1; - results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage())); - if (countDown.countDown()) { - listener.onResponse(new SyncedFlushResponse(results)); - } - } - }); - } - } - } - - /* - * Tries to flush all copies of a shard and write a sync id to it. - * After a synced flush two shard copies may only contain the same sync id if they contain the same documents. - * To ensure this, synced flush works in three steps: - * 1. Flush all shard copies and gather the commit ids for each copy after the flush - * 2. Ensure that there are no ongoing indexing operations on the primary - * 3. Perform an additional flush on each shard copy that writes the sync id - * - * Step 3 is only executed on a shard if - * a) the shard has no uncommitted changes since the last flush - * b) the last flush was the one executed in 1 (use the collected commit id to verify this) - * - * This alone is not enough to ensure that all copies contain the same documents. - * Without step 2 a sync id would be written for inconsistent copies in the following scenario: - * - * Write operation has completed on a primary and is being sent to replicas. The write request does not reach the - * replicas until sync flush is finished. - * Step 1 is executed. After the flush the commit points on primary contains a write operation that the replica does not have. - * Step 3 will be executed on primary and replica as well because there are no uncommitted changes on primary (the first flush - * committed them) and there are no uncommitted changes on the replica (the write operation has not reached the replica yet). - * - * Step 2 detects this scenario and fails the whole synced flush if a write operation is ongoing on the primary. - * Together with the conditions for step 3 (same commit id and no uncommitted changes) this guarantees that a snc id will only - * be written on a primary if no write operation was executed between step 1 and step 3 and sync id will only be written on - * the replica if it contains the same changes that the primary contains. - * - * Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies. - **/ - public void attemptSyncedFlush(final ShardId shardId, final ActionListener actionListener) { - innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener); - } - - private void innerAttemptSyncedFlush( - final ShardId shardId, - final ClusterState state, - final ActionListener actionListener - ) { - try { - final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - final int totalShards = shardRoutingTable.getSize(); - - if (activeShards.size() == 0) { - actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards")); - return; - } - - // 1. send pre-sync flushes to all replicas - final StepListener> presyncStep = new StepListener<>(); - sendPreSyncRequests(activeShards, state, shardId, presyncStep); - - // 2. fetch in flight operations - final StepListener inflightOpsStep = new StepListener<>(); - presyncStep.whenComplete(presyncResponses -> { - if (presyncResponses.isEmpty()) { - actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "all shards failed to commit on pre-sync")); - } else { - getInflightOpsCount(shardId, state, shardRoutingTable, inflightOpsStep); - } - }, actionListener::onFailure); - - // 3. now send the sync request to all the shards - inflightOpsStep.whenComplete(inFlightOpsResponse -> { - final Map presyncResponses = presyncStep.result(); - final int inflight = inFlightOpsResponse.opCount(); - assert inflight >= 0; - if (inflight != 0) { - actionListener.onResponse( - new ShardsSyncedFlushResult(shardId, totalShards, "[" + inflight + "] ongoing operations on primary") - ); - } else { - final String sharedSyncId = sharedExistingSyncId(presyncResponses); - if (sharedSyncId != null) { - assert presyncResponses.values() - .stream() - .allMatch(r -> r.existingSyncId.equals(sharedSyncId)) : "Not all shards have the same existing sync id [" - + sharedSyncId - + "], responses [" - + presyncResponses - + "]"; - reportSuccessWithExistingSyncId(shardId, sharedSyncId, activeShards, totalShards, presyncResponses, actionListener); - } else { - String syncId = UUIDs.randomBase64UUID(); - sendSyncRequests(syncId, activeShards, state, presyncResponses, shardId, totalShards, actionListener); - } - } - }, actionListener::onFailure); - } catch (Exception e) { - actionListener.onFailure(e); - } - } - - private String sharedExistingSyncId(Map preSyncedFlushResponses) { - String existingSyncId = null; - for (PreSyncedFlushResponse resp : preSyncedFlushResponses.values()) { - if (Strings.isNullOrEmpty(resp.existingSyncId)) { - return null; - } - if (existingSyncId == null) { - existingSyncId = resp.existingSyncId; - } - if (existingSyncId.equals(resp.existingSyncId) == false) { - return null; - } - } - return existingSyncId; - } - - private void reportSuccessWithExistingSyncId( - ShardId shardId, - String existingSyncId, - List shards, - int totalShards, - Map preSyncResponses, - ActionListener listener - ) { - final Map results = new HashMap<>(); - for (final ShardRouting shard : shards) { - if (preSyncResponses.containsKey(shard.currentNodeId())) { - results.put(shard, new ShardSyncedFlushResponse((String) null)); - } - } - listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results)); - } - - final IndexShardRoutingTable getShardRoutingTable(final ShardId shardId, final ClusterState state) { - final IndexMetadata indexMetadata = state.getMetadata().index(shardId.getIndex()); - if (indexMetadata == null) { - throw new IndexNotFoundException(shardId.getIndexName()); - } else if (indexMetadata.getState() == IndexMetadata.State.CLOSE) { - throw new IndexClosedException(shardId.getIndex()); - } - final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(indexMetadata.getIndex()).shard(shardId.id()); - if (shardRoutingTable == null) { - throw new ShardNotFoundException(shardId); - } - return shardRoutingTable; - } - - /** - * returns the number of in flight operations on primary. -1 upon error. - */ - protected void getInflightOpsCount( - final ShardId shardId, - ClusterState state, - IndexShardRoutingTable shardRoutingTable, - final ActionListener listener - ) { - try { - final ShardRouting primaryShard = shardRoutingTable.primaryShard(); - final DiscoveryNode primaryNode = state.nodes().get(primaryShard.currentNodeId()); - if (primaryNode == null) { - logger.trace("{} failed to resolve node for primary shard {}, skipping sync", shardId, primaryShard); - listener.onResponse(new InFlightOpsResponse(-1)); - return; - } - logger.trace("{} retrieving in flight operation count", shardId); - transportService.sendRequest( - primaryNode, - IN_FLIGHT_OPS_ACTION_NAME, - new InFlightOpsRequest(shardId), - new TransportResponseHandler() { - @Override - public InFlightOpsResponse read(StreamInput in) throws IOException { - return new InFlightOpsResponse(in); - } - - @Override - public void handleResponse(InFlightOpsResponse response) { - listener.onResponse(response); - } - - @Override - public void handleException(TransportException exp) { - logger.debug("{} unexpected error while retrieving in flight op count", shardId); - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - } - ); - } catch (Exception e) { - listener.onFailure(e); - } - } - - private int numDocsOnPrimary(List shards, Map preSyncResponses) { - for (ShardRouting shard : shards) { - if (shard.primary()) { - final PreSyncedFlushResponse resp = preSyncResponses.get(shard.currentNodeId()); - if (resp != null) { - return resp.numDocs; - } - } - } - return PreSyncedFlushResponse.UNKNOWN_NUM_DOCS; - } - - void sendSyncRequests( - final String syncId, - final List shards, - ClusterState state, - Map preSyncResponses, - final ShardId shardId, - final int totalShards, - final ActionListener listener - ) { - final CountDown countDown = new CountDown(shards.size()); - final Map results = ConcurrentCollections.newConcurrentMap(); - final int numDocsOnPrimary = numDocsOnPrimary(shards, preSyncResponses); - for (final ShardRouting shard : shards) { - final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); - if (node == null) { - logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new ShardSyncedFlushResponse("unknown node")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - final PreSyncedFlushResponse preSyncedResponse = preSyncResponses.get(shard.currentNodeId()); - if (preSyncedResponse == null) { - logger.trace( - "{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", - shardId, - syncId, - shard - ); - results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - if (preSyncedResponse.numDocs != numDocsOnPrimary - && preSyncedResponse.numDocs != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS - && numDocsOnPrimary != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS) { - logger.debug( - "{} can't issue sync id [{}] for replica [{}] with num docs [{}]; num docs on primary [{}]", - shardId, - syncId, - shard, - preSyncedResponse.numDocs, - numDocsOnPrimary - ); - results.put( - shard, - new ShardSyncedFlushResponse( - "ongoing indexing operations: " - + "num docs on replica [" - + preSyncedResponse.numDocs - + "]; num docs on primary [" - + numDocsOnPrimary - + "]" - ) - ); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId); - ShardSyncedFlushRequest syncedFlushRequest = new ShardSyncedFlushRequest(shard.shardId(), syncId, preSyncedResponse.commitId); - transportService.sendRequest( - node, - SYNCED_FLUSH_ACTION_NAME, - syncedFlushRequest, - new TransportResponseHandler() { - @Override - public ShardSyncedFlushResponse read(StreamInput in) throws IOException { - return new ShardSyncedFlushResponse(in); - } - - @Override - public void handleResponse(ShardSyncedFlushResponse response) { - ShardSyncedFlushResponse existing = results.put(shard, response); - assert existing == null : "got two answers for node [" + node + "]"; - // count after the assert so we won't decrement twice in handleException - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - } - - @Override - public void handleException(TransportException exp) { - logger.trace( - () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), - exp - ); - results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - } - ); - } - - } - - private void countDownAndSendResponseIfDone( - String syncId, - List shards, - ShardId shardId, - int totalShards, - ActionListener listener, - CountDown countDown, - Map results - ) { - if (countDown.countDown()) { - assert results.size() == shards.size(); - listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); - } - } - - /** - * send presync requests to all started copies of the given shard - */ - void sendPreSyncRequests( - final List shards, - final ClusterState state, - final ShardId shardId, - final ActionListener> listener - ) { - final CountDown countDown = new CountDown(shards.size()); - final ConcurrentMap presyncResponses = ConcurrentCollections.newConcurrentMap(); - for (final ShardRouting shard : shards) { - logger.trace("{} sending pre-synced flush request to {}", shardId, shard); - final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); - if (node == null) { - logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard); - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - continue; - } - transportService.sendRequest( - node, - PRE_SYNCED_FLUSH_ACTION_NAME, - new PreShardSyncedFlushRequest(shard.shardId()), - new TransportResponseHandler() { - @Override - public PreSyncedFlushResponse read(StreamInput in) throws IOException { - return new PreSyncedFlushResponse(in); - } - - @Override - public void handleResponse(PreSyncedFlushResponse response) { - PreSyncedFlushResponse existing = presyncResponses.putIfAbsent(node.getId(), response); - assert existing == null : "got two answers for node [" + node + "]"; - // count after the assert so we won't decrement twice in handleException - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - } - - @Override - public void handleException(TransportException exp) { - logger.trace( - () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), - exp - ); - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - } - ); - } - } - - private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); - FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); - logger.trace("{} performing pre sync flush", request.shardId()); - indexShard.flush(flushRequest); - final CommitStats commitStats = indexShard.commitStats(); - final Engine.CommitId commitId = commitStats.getRawCommitId(); - logger.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs()); - return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId()); - } - - private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - logger.trace( - "{} performing sync flush. sync id [{}], expected commit id {}", - request.shardId(), - request.syncId(), - request.expectedCommitId() - ); - Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId()); - logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); - switch (result) { - case SUCCESS: - return new ShardSyncedFlushResponse((String) null); - case COMMIT_MISMATCH: - return new ShardSyncedFlushResponse("commit has changed"); - case PENDING_OPERATIONS: - return new ShardSyncedFlushResponse("pending operations"); - default: - throw new OpenSearchException("unknown synced flush result [" + result + "]"); - } - } - - private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - if (indexShard.routingEntry().primary() == false) { - throw new IllegalStateException("[" + request.shardId() + "] expected a primary shard"); - } - int opCount = indexShard.getActiveOperationsCount(); - return new InFlightOpsResponse(opCount == IndexShard.OPERATIONS_BLOCKED ? 0 : opCount); - } - - public static final class PreShardSyncedFlushRequest extends TransportRequest { - private ShardId shardId; - - public PreShardSyncedFlushRequest(StreamInput in) throws IOException { - super(in); - this.shardId = new ShardId(in); - } - - public PreShardSyncedFlushRequest(ShardId shardId) { - this.shardId = shardId; - } - - @Override - public String toString() { - return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}'; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } - - public ShardId shardId() { - return shardId; - } - } - - /** - * Response for first step of synced flush (flush) for one shard copy - */ - static final class PreSyncedFlushResponse extends TransportResponse { - static final int UNKNOWN_NUM_DOCS = -1; - - Engine.CommitId commitId; - int numDocs; - @Nullable - String existingSyncId = null; - - PreSyncedFlushResponse(StreamInput in) throws IOException { - super(in); - commitId = new Engine.CommitId(in); - numDocs = in.readInt(); - existingSyncId = in.readOptionalString(); - } - - PreSyncedFlushResponse(Engine.CommitId commitId, int numDocs, String existingSyncId) { - this.commitId = commitId; - this.numDocs = numDocs; - this.existingSyncId = existingSyncId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - commitId.writeTo(out); - out.writeInt(numDocs); - out.writeOptionalString(existingSyncId); - } - } - - public static final class ShardSyncedFlushRequest extends TransportRequest { - - private String syncId; - private Engine.CommitId expectedCommitId; - private ShardId shardId; - - public ShardSyncedFlushRequest(StreamInput in) throws IOException { - super(in); - shardId = new ShardId(in); - expectedCommitId = new Engine.CommitId(in); - syncId = in.readString(); - } - - public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { - this.expectedCommitId = expectedCommitId; - this.shardId = shardId; - this.syncId = syncId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - expectedCommitId.writeTo(out); - out.writeString(syncId); - } - - public ShardId shardId() { - return shardId; - } - - public String syncId() { - return syncId; - } - - public Engine.CommitId expectedCommitId() { - return expectedCommitId; - } - - @Override - public String toString() { - return "ShardSyncedFlushRequest{" + "shardId=" + shardId + ",syncId='" + syncId + '\'' + '}'; - } - } - - /** - * Response for third step of synced flush (writing the sync id) for one shard copy - */ - public static final class ShardSyncedFlushResponse extends TransportResponse { - - /** - * a non null value indicates a failure to sync flush. null means success - */ - String failureReason; - - public ShardSyncedFlushResponse(StreamInput in) throws IOException { - super(in); - failureReason = in.readOptionalString(); - } - - public ShardSyncedFlushResponse(String failureReason) { - this.failureReason = failureReason; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(failureReason); - } - - public boolean success() { - return failureReason == null; - } - - public String failureReason() { - return failureReason; - } - - @Override - public String toString() { - return "ShardSyncedFlushResponse{" + "success=" + success() + ", failureReason='" + failureReason + '\'' + '}'; - } - - public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException { - return new ShardSyncedFlushResponse(in); - } - } - - public static final class InFlightOpsRequest extends TransportRequest { - - private ShardId shardId; - - public InFlightOpsRequest(StreamInput in) throws IOException { - super(in); - shardId = new ShardId(in); - } - - public InFlightOpsRequest(ShardId shardId) { - this.shardId = shardId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } - - public ShardId shardId() { - return shardId; - } - - @Override - public String toString() { - return "InFlightOpsRequest{" + "shardId=" + shardId + '}'; - } - } - - /** - * Response for second step of synced flush (check operations in flight) - */ - static final class InFlightOpsResponse extends TransportResponse { - - int opCount; - - InFlightOpsResponse(StreamInput in) throws IOException { - super(in); - opCount = in.readVInt(); - } - - InFlightOpsResponse(int opCount) { - assert opCount >= 0 : opCount; - this.opCount = opCount; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(opCount); - } - - public int opCount() { - return opCount; - } - - @Override - public String toString() { - return "InFlightOpsResponse{" + "opCount=" + opCount + '}'; - } - } - - private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performPreSyncedFlush(request)); - } - } - - private final class SyncedFlushTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performSyncedFlush(request)); - } - } - - private final class InFlightOpCountTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(InFlightOpsRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performInFlightOps(request)); - } - } - -} diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index a21ae475ba08a..5e9db2e1d67f3 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -260,8 +260,8 @@ synchronized void remove(IndexShard shard, RecoverySourceHandler handler) { if (removed != null) { shard.recoveryStats().decCurrentAsSource(); removed.cancel(); - assert nodeToHandlers.getOrDefault(removed.targetNode(), Collections.emptySet()) - .contains(removed) : "Remote recovery was not properly tracked [" + removed + "]"; + assert nodeToHandlers.getOrDefault(removed.targetNode(), Collections.emptySet()).contains(removed) + : "Remote recovery was not properly tracked [" + removed + "]"; nodeToHandlers.computeIfPresent(removed.targetNode(), (k, handlersForNode) -> { handlersForNode.remove(removed); if (handlersForNode.isEmpty()) { diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 50f377b6f40a1..6c972d78772ca 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -233,12 +233,8 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); final long startingSeqNo = indexShard.recoverLocallyUpToGlobalCheckpoint(); - assert startingSeqNo == UNASSIGNED_SEQ_NO - || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" - + recoveryTarget.state().getStage() - + "] starting seqno [ " - + startingSeqNo - + "]"; + assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG + : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]"; startRequest = getStartRecoveryRequest(logger, clusterService.localNode(), recoveryTarget, startingSeqNo); requestToSend = startRequest; actionName = PeerRecoverySourceService.Actions.START_RECOVERY; @@ -469,18 +465,15 @@ public void onTimeout(TimeValue timeout) { request.maxSeqNoOfUpdatesOrDeletesOnPrimary(), request.retentionLeases(), request.mappingVersionOnPrimary(), - ActionListener.wrap( - checkpoint -> listener.onResponse(null), - e -> { - // do not retry if the mapping on replica is at least as recent as the mapping - // that the primary used to index the operations in the request. - if (mappingVersionOnTarget < request.mappingVersionOnPrimary() && e instanceof MapperException) { - retryOnMappingException.accept(e); - } else { - listener.onFailure(e); - } + ActionListener.wrap(checkpoint -> listener.onResponse(null), e -> { + // do not retry if the mapping on replica is at least as recent as the mapping + // that the primary used to index the operations in the request. + if (mappingVersionOnTarget < request.mappingVersionOnPrimary() && e instanceof MapperException) { + retryOnMappingException.accept(e); + } else { + listener.onFailure(e); } - ) + }) ); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 1bd659853e10e..dcb7024ae8c75 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -132,6 +132,7 @@ public class RecoverySourceHandler { private final CancellableThreads cancellableThreads = new CancellableThreads(); private final List resources = new CopyOnWriteArrayList<>(); private final ListenableFuture future = new ListenableFuture<>(); + private static final String PEER_RECOVERY_NAME = "peer-recovery"; public RecoverySourceHandler( IndexShard shard, @@ -187,7 +188,6 @@ public void recoverToTarget(ActionListener listener) { IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); }; - final boolean softDeletesEnabled = shard.indexSettings().isSoftDeleteEnabled(); final SetOnce retentionLeaseRef = new SetOnce<>(); runUnderPrimaryPermit(() -> { @@ -211,19 +211,13 @@ public void recoverToTarget(ActionListener listener) { cancellableThreads, logger ); - final Engine.HistorySource historySource; - if (softDeletesEnabled && (shard.useRetentionLeasesInPeerRecovery() || retentionLeaseRef.get() != null)) { - historySource = Engine.HistorySource.INDEX; - } else { - historySource = Engine.HistorySource.TRANSLOG; - } - final Closeable retentionLock = shard.acquireHistoryRetentionLock(historySource); + final Closeable retentionLock = shard.acquireHistoryRetentionLock(); resources.add(retentionLock); final long startingSeqNo; final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && isTargetSameHistory() - && shard.hasCompleteHistoryOperations("peer-recovery", historySource, request.startingSeqNo()) - && (historySource == Engine.HistorySource.TRANSLOG + && shard.hasCompleteHistoryOperations(PEER_RECOVERY_NAME, request.startingSeqNo()) + && ((retentionLeaseRef.get() == null && shard.useRetentionLeasesInPeerRecovery() == false) || (retentionLeaseRef.get() != null && retentionLeaseRef.get().retainingSequenceNumber() <= request.startingSeqNo())); // NB check hasCompleteHistoryOperations when computing isSequenceNumberBasedRecovery, even if there is a retention lease, // because when doing a rolling upgrade from earlier than 7.4 we may create some leases that are initially unsatisfied. It's @@ -231,7 +225,7 @@ && isTargetSameHistory() // Also it's pretty cheap when soft deletes are enabled, and it'd be a disaster if we tried a sequence-number-based recovery // without having a complete history. - if (isSequenceNumberBasedRecovery && softDeletesEnabled && retentionLeaseRef.get() != null) { + if (isSequenceNumberBasedRecovery && retentionLeaseRef.get() != null) { // all the history we need is retained by an existing retention lease, so we do not need a separate retention lock retentionLock.close(); logger.trace("history is retained by {}", retentionLeaseRef.get()); @@ -274,13 +268,11 @@ && isTargetSameHistory() // advances and not when creating a new safe commit. In any case this is a best-effort thing since future recoveries can // always fall back to file-based ones, and only really presents a problem if this primary fails before things have settled // down. - startingSeqNo = softDeletesEnabled - ? Long.parseLong(safeCommitRef.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1L - : 0; + startingSeqNo = Long.parseLong(safeCommitRef.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1L; logger.trace("performing file-based recovery followed by history replay starting at [{}]", startingSeqNo); try { - final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", historySource, startingSeqNo); + final int estimateNumOps = estimateNumberOfHistoryOperations(startingSeqNo); final Releasable releaseStore = acquireStore(shard.store()); resources.add(releaseStore); sendFileStep.whenComplete(r -> IOUtils.close(safeCommitRef, releaseStore), e -> { @@ -327,10 +319,7 @@ && isTargetSameHistory() sendFileStep.whenComplete(r -> { assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog( - shard.estimateNumberOfHistoryOperations("peer-recovery", historySource, startingSeqNo), - prepareEngineStep - ); + prepareTargetForTranslog(estimateNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { @@ -350,11 +339,10 @@ && isTargetSameHistory() ); final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); - logger.trace( - "snapshot translog for recovery; current size is [{}]", - shard.estimateNumberOfHistoryOperations("peer-recovery", historySource, startingSeqNo) - ); - final Translog.Snapshot phase2Snapshot = shard.getHistoryOperations("peer-recovery", historySource, startingSeqNo); + if (logger.isTraceEnabled()) { + logger.trace("snapshot translog for recovery; current size is [{}]", estimateNumberOfHistoryOperations(startingSeqNo)); + } + final Translog.Snapshot phase2Snapshot = shard.newChangesSnapshot(PEER_RECOVERY_NAME, startingSeqNo, Long.MAX_VALUE, false); resources.add(phase2Snapshot); retentionLock.close(); @@ -415,6 +403,12 @@ private boolean isTargetSameHistory() { return targetHistoryUUID.equals(shard.getHistoryUUID()); } + private int estimateNumberOfHistoryOperations(long startingSeqNo) throws IOException { + try (Translog.Snapshot snapshot = shard.newChangesSnapshot(PEER_RECOVERY_NAME, startingSeqNo, Long.MAX_VALUE, false)) { + return snapshot.totalOperations(); + } + } + static void runUnderPrimaryPermit( CancellableThreads.Interruptible runnable, String reason, @@ -644,10 +638,8 @@ void phase1(IndexCommit snapshot, long startingSeqNo, IntSupplier translogOps, A createRetentionLeaseStep.whenComplete(retentionLease -> { final long lastKnownGlobalCheckpoint = shard.getLastKnownGlobalCheckpoint(); - assert retentionLease == null - || retentionLease.retainingSequenceNumber() - 1 <= lastKnownGlobalCheckpoint : retentionLease - + " vs " - + lastKnownGlobalCheckpoint; + assert retentionLease == null || retentionLease.retainingSequenceNumber() - 1 <= lastKnownGlobalCheckpoint + : retentionLease + " vs " + lastKnownGlobalCheckpoint; // Establishes new empty translog on the replica with global checkpoint set to lastKnownGlobalCheckpoint. We want // the commit we just copied to be a safe commit on the replica, so why not set the global checkpoint on the replica // to the max seqno of this commit? Because (in rare corner cases) this commit might not be a safe commit here on diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 0cd7a9b010cc0..1a11345e8e47d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -138,11 +138,8 @@ public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nulla public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, Index index) { assert shardRouting.initializing() : "only allow initializing shard routing to be recovered: " + shardRouting; RecoverySource recoverySource = shardRouting.recoverySource(); - assert (recoverySource - .getType() == RecoverySource.Type.PEER) == (sourceNode != null) : "peer recovery requires source node, recovery type: " - + recoverySource.getType() - + " source node: " - + sourceNode; + assert (recoverySource.getType() == RecoverySource.Type.PEER) == (sourceNode != null) + : "peer recovery requires source node, recovery type: " + recoverySource.getType() + " source node: " + sourceNode; this.shardId = shardRouting.shardId(); this.primary = shardRouting.primary(); this.recoverySource = recoverySource; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index b4bcec3273379..3ea7cad528e82 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -344,11 +344,11 @@ public void finalizeRecovery(final long globalCheckpoint, final long trimAboveSe private boolean hasUncommittedOperations() throws IOException { long localCheckpointOfCommit = Long.parseLong(indexShard.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); - return indexShard.estimateNumberOfHistoryOperations( - "peer-recovery", - indexShard.indexSettings().isSoftDeleteEnabled() ? Engine.HistorySource.INDEX : Engine.HistorySource.TRANSLOG, - localCheckpointOfCommit + 1 - ) > 0; + try ( + Translog.Snapshot snapshot = indexShard.newChangesSnapshot("peer-recovery", localCheckpointOfCommit + 1, Long.MAX_VALUE, false) + ) { + return snapshot.totalOperations() > 0; + } } @Override diff --git a/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java b/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java index dd4d7d99e978c..f2efae2fbe798 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java @@ -98,8 +98,8 @@ public StartRecoveryRequest( this.metadataSnapshot = metadataSnapshot; this.primaryRelocation = primaryRelocation; this.startingSeqNo = startingSeqNo; - assert startingSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO - || metadataSnapshot.getHistoryUUID() != null : "starting seq no is set but not history uuid"; + assert startingSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || metadataSnapshot.getHistoryUUID() != null + : "starting seq no is set but not history uuid"; } public long recoveryId() { diff --git a/server/src/main/java/org/opensearch/ingest/IngestDocument.java b/server/src/main/java/org/opensearch/ingest/IngestDocument.java index 820763dde43cb..b496799c34dd0 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/opensearch/ingest/IngestDocument.java @@ -76,19 +76,10 @@ public final class IngestDocument { // Contains all pipelines that have been executed for this document private final Set executedPipelines = new LinkedHashSet<>(); - public IngestDocument( - String index, - String type, - String id, - String routing, - Long version, - VersionType versionType, - Map source - ) { + public IngestDocument(String index, String id, String routing, Long version, VersionType versionType, Map source) { this.sourceAndMetadata = new HashMap<>(); this.sourceAndMetadata.putAll(source); this.sourceAndMetadata.put(Metadata.INDEX.getFieldName(), index); - this.sourceAndMetadata.put(Metadata.TYPE.getFieldName(), type); this.sourceAndMetadata.put(Metadata.ID.getFieldName(), id); if (routing != null) { this.sourceAndMetadata.put(Metadata.ROUTING.getFieldName(), routing); diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index f50d437c26fdb..cbd5fa71b27de 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -722,13 +722,12 @@ private void innerExecute( // (e.g. the pipeline may have been removed while we're ingesting a document totalMetrics.preIngest(); String index = indexRequest.index(); - String type = indexRequest.type(); String id = indexRequest.id(); String routing = indexRequest.routing(); Long version = indexRequest.version(); VersionType versionType = indexRequest.versionType(); Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); + IngestDocument ingestDocument = new IngestDocument(index, id, routing, version, versionType, sourceAsMap); ingestDocument.executePipeline(pipeline, (result, e) -> { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); totalMetrics.postIngest(ingestTimeInMillis); @@ -743,7 +742,6 @@ private void innerExecute( // it's fine to set all metadata fields all the time, as ingest document holds their starting values // before ingestion, which might also get modified during ingestion. indexRequest.index((String) metadataMap.get(IngestDocument.Metadata.INDEX)); - indexRequest.type((String) metadataMap.get(IngestDocument.Metadata.TYPE)); indexRequest.id((String) metadataMap.get(IngestDocument.Metadata.ID)); indexRequest.routing((String) metadataMap.get(IngestDocument.Metadata.ROUTING)); indexRequest.version(((Number) metadataMap.get(IngestDocument.Metadata.VERSION)).longValue()); diff --git a/server/src/main/java/org/opensearch/ingest/TrackingResultProcessor.java b/server/src/main/java/org/opensearch/ingest/TrackingResultProcessor.java index e9920b1332a18..efcb3b8d9840f 100644 --- a/server/src/main/java/org/opensearch/ingest/TrackingResultProcessor.java +++ b/server/src/main/java/org/opensearch/ingest/TrackingResultProcessor.java @@ -100,59 +100,56 @@ public void execute(IngestDocument ingestDocument, BiConsumer { - // special handling for pipeline cycle errors - if (e instanceof OpenSearchException - && e.getCause() instanceof IllegalStateException - && e.getCause().getMessage().startsWith(PIPELINE_CYCLE_ERROR_MESSAGE)) { - if (ignoreFailure) { - processorResultList.add( - new SimulateProcessorResult( - pipelineProcessor.getType(), - pipelineProcessor.getTag(), - pipelineProcessor.getDescription(), - new IngestDocument(ingestDocument), - e, - conditionalWithResult - ) - ); - } else { - processorResultList.add( - new SimulateProcessorResult( - pipelineProcessor.getType(), - pipelineProcessor.getTag(), - pipelineProcessor.getDescription(), - e, - conditionalWithResult - ) - ); - } - handler.accept(null, e); - } else { - // now that we know that there are no cycles between pipelines, decorate the processors for this pipeline and - // execute it - CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), null, processorResultList); - // add the pipeline process to the results + ingestDocumentCopy.executePipeline(pipelineToCall, (result, e) -> { + // special handling for pipeline cycle errors + if (e instanceof OpenSearchException + && e.getCause() instanceof IllegalStateException + && e.getCause().getMessage().startsWith(PIPELINE_CYCLE_ERROR_MESSAGE)) { + if (ignoreFailure) { processorResultList.add( new SimulateProcessorResult( - actualProcessor.getType(), - actualProcessor.getTag(), - actualProcessor.getDescription(), + pipelineProcessor.getType(), + pipelineProcessor.getTag(), + pipelineProcessor.getDescription(), + new IngestDocument(ingestDocument), + e, conditionalWithResult ) ); - Pipeline verbosePipeline = new Pipeline( - pipeline.getId(), - pipeline.getDescription(), - pipeline.getVersion(), - verbosePipelineProcessor + } else { + processorResultList.add( + new SimulateProcessorResult( + pipelineProcessor.getType(), + pipelineProcessor.getTag(), + pipelineProcessor.getDescription(), + e, + conditionalWithResult + ) ); - ingestDocument.executePipeline(verbosePipeline, handler); } + handler.accept(null, e); + } else { + // now that we know that there are no cycles between pipelines, decorate the processors for this pipeline and + // execute it + CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), null, processorResultList); + // add the pipeline process to the results + processorResultList.add( + new SimulateProcessorResult( + actualProcessor.getType(), + actualProcessor.getTag(), + actualProcessor.getDescription(), + conditionalWithResult + ) + ); + Pipeline verbosePipeline = new Pipeline( + pipeline.getId(), + pipeline.getDescription(), + pipeline.getVersion(), + verbosePipelineProcessor + ); + ingestDocument.executePipeline(verbosePipeline, handler); } - ); + }); return; } diff --git a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java index 5097d1b0f4c05..eda86c49539dd 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java @@ -51,6 +51,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -401,6 +402,7 @@ private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup long numberOfPeriods = -1; long numberOfTimesThrottled = -1; long timeThrottledNanos = -1; + for (final String line : lines) { final String[] fields = line.split("\\s+"); switch (fields[0]) { @@ -415,9 +417,17 @@ private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup break; } } - assert numberOfPeriods != -1; - assert numberOfTimesThrottled != -1; - assert timeThrottledNanos != -1; + if (isCpuStatWarningsLogged.getAndSet(true) == false) { + if (numberOfPeriods == -1) { + logger.warn("Expected to see nr_periods filed but found nothing"); + } + if (numberOfTimesThrottled == -1) { + logger.warn("Expected to see nr_throttled filed but found nothing"); + } + if (timeThrottledNanos == -1) { + logger.warn("Expected to see throttled_time filed but found nothing"); + } + } return new OsStats.Cgroup.CpuStat(numberOfPeriods, numberOfTimesThrottled, timeThrottledNanos); } @@ -440,7 +450,7 @@ private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup @SuppressForbidden(reason = "access /sys/fs/cgroup/cpu") List readSysFsCgroupCpuAcctCpuStat(final String controlGroup) throws IOException { final List lines = Files.readAllLines(PathUtils.get("/sys/fs/cgroup/cpu", controlGroup, "cpu.stat")); - assert lines != null && lines.size() == 3; + assert lines != null && lines.isEmpty() == false; return lines; } @@ -588,11 +598,18 @@ public static OsProbe getInstance() { return OsProbeHolder.INSTANCE; } - OsProbe() { + private final Logger logger; + + private AtomicBoolean isCpuStatWarningsLogged = new AtomicBoolean(false); + OsProbe() { + this(LogManager.getLogger(OsProbe.class)); } - private final Logger logger = LogManager.getLogger(getClass()); + /*For testing purpose*/ + OsProbe(final Logger logger) { + this.logger = logger; + } OsInfo osInfo(long refreshInterval, int allocatedProcessors) throws IOException { return new OsInfo( diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 2d951ad817c80..8224d2258e465 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1080,8 +1080,8 @@ public Node start() throws NodeValidationException { transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); transportService.start(); assert localNodeFactory.getNode() != null; - assert transportService.getLocalNode() - .equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; + assert transportService.getLocalNode().equals(localNodeFactory.getNode()) + : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); // Load (and maybe upgrade) the metadata stored on disk @@ -1124,8 +1124,8 @@ public Node start() throws NodeValidationException { // start after transport service so the local disco is known discovery.start(); // start before cluster service so that it can set initial state on ClusterApplierService clusterService.start(); - assert clusterService.localNode() - .equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; + assert clusterService.localNode().equals(localNodeFactory.getNode()) + : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index db1995dbca988..3a5b965d07fb6 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -33,6 +33,7 @@ package org.opensearch.plugins; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; import org.opensearch.common.CheckedFunction; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.NamedWriteable; @@ -62,6 +63,8 @@ import org.opensearch.search.fetch.subphase.highlight.Highlighter; import org.opensearch.search.rescore.Rescorer; import org.opensearch.search.rescore.RescorerBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.search.sort.SortParser; import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.Suggester; import org.opensearch.search.suggest.SuggestionBuilder; @@ -138,6 +141,13 @@ default List> getQueries() { return emptyList(); } + /** + * The new {@link Sort}s defined by this plugin. + */ + default List> getSorts() { + return emptyList(); + } + /** * The new {@link Aggregation}s added by this plugin. */ @@ -172,10 +182,31 @@ default List> getRescorers() { * Specification of custom {@link ScoreFunction}. */ class ScoreFunctionSpec> extends SearchExtensionSpec> { + /** + * Specification of custom {@link ScoreFunctionBuilder}. + * + * @param name holds the names by which this score function might be parsed. The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the score function should use as its + * {@link NamedWriteable#getWriteableName()} too. It is an error if {@link ParseField#getPreferredName()} conflicts with + * another registered name, including names from other plugins. + * @param reader the reader registered for this score function's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the score function builder from xcontent + */ public ScoreFunctionSpec(ParseField name, Writeable.Reader reader, ScoreFunctionParser parser) { super(name, reader, parser); } + /** + * Specification of custom {@link ScoreFunctionBuilder}. + * + * @param name the name by which this score function might be parsed or deserialized. Make sure that the score function builder returns this name for + * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including + * names from other plugins. + * @param reader the reader registered for this score function's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the score function builder from xcontent + */ public ScoreFunctionSpec(String name, Writeable.Reader reader, ScoreFunctionParser parser) { super(name, reader, parser); } @@ -185,10 +216,31 @@ public ScoreFunctionSpec(String name, Writeable.Reader reader, ScoreFunctionP * Specification of custom {@link SignificanceHeuristic}. */ class SignificanceHeuristicSpec extends SearchExtensionSpec> { + /** + * Specification of custom {@link SignificanceHeuristic}. + * + * @param name holds the names by which this heuristic might be parsed. The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the heuristic should use as its + * {@link NamedWriteable#getWriteableName()} too. It is an error if {@link ParseField#getPreferredName()} conflicts with + * another registered name, including names from other plugins. + * @param reader the reader registered for this heuristic. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser function that reads the heuristic from xcontent + */ public SignificanceHeuristicSpec(ParseField name, Writeable.Reader reader, BiFunction parser) { super(name, reader, parser); } + /** + * Specification of custom {@link SignificanceHeuristic}. + * + * @param name the name by which this heuristic might be parsed or deserialized. Make sure that the heuristic returns this name for + * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including + * names from other plugins. + * @param reader the reader registered for this heuristic. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser function that reads the heuristic from xcontent + */ public SignificanceHeuristicSpec(String name, Writeable.Reader reader, BiFunction parser) { super(name, reader, parser); } @@ -206,11 +258,12 @@ class SuggesterSpec> extends SearchExtensionSpec< * * @param name holds the names by which this suggester might be parsed. The {@link ParseField#getPreferredName()} is special as it * is the name by under which the request builder and Suggestion response readers are registered. So it is the name that the - * query and Suggestion response should use as their {@link NamedWriteable#getWriteableName()} return values too. - * @param builderReader the reader registered for this suggester's builder. Typically a reference to a constructor that takes a + * query and Suggestion response should use as their {@link NamedWriteable#getWriteableName()} return values too. It is + * an error if {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. + * @param builderReader the reader registered for this suggester's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} * @param builderParser a parser that reads the suggester's builder from xcontent - * @param suggestionReader the reader registered for this suggester's Suggestion response. Typically a reference to a constructor + * @param suggestionReader the reader registered for this suggester's Suggestion response. Typically, a reference to a constructor * that takes a {@link StreamInput} */ public SuggesterSpec( @@ -228,11 +281,12 @@ public SuggesterSpec( * Specification of custom {@link Suggester}. * * @param name the name by which this suggester might be parsed or deserialized. Make sure that the query builder and Suggestion - * response reader return this name for {@link NamedWriteable#getWriteableName()}. - * @param builderReader the reader registered for this suggester's builder. Typically a reference to a constructor that takes a + * response reader return this name for {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts + * with another registered name, including names from other plugins. + * @param builderReader the reader registered for this suggester's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} * @param builderParser a parser that reads the suggester's builder from xcontent - * @param suggestionReader the reader registered for this suggester's Suggestion response. Typically a reference to a constructor + * @param suggestionReader the reader registered for this suggester's Suggestion response. Typically, a reference to a constructor * that takes a {@link StreamInput} */ public SuggesterSpec( @@ -267,8 +321,9 @@ class QuerySpec extends SearchExtensionSpec reader, QueryParser par * Specification of custom {@link Query}. * * @param name the name by which this query might be parsed or deserialized. Make sure that the query builder returns this name for - * {@link NamedWriteable#getWriteableName()}. - * @param reader the reader registered for this query's builder. Typically a reference to a constructor that takes a + * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including + * names from other plugins. + * @param reader the reader registered for this query's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} * @param parser the parser the reads the query builder from xcontent */ @@ -290,6 +346,40 @@ public QuerySpec(String name, Writeable.Reader reader, QueryParser parser) } } + /** + * Specification of custom {@link Sort}. + */ + class SortSpec> extends SearchExtensionSpec> { + /** + * Specification of custom {@link Sort}. + * + * @param name holds the names by which this sort might be parsed. The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the sort should use as its + * {@link NamedWriteable#getWriteableName()} too. It is an error if {@link ParseField#getPreferredName()} conflicts with + * another registered name, including names from other plugins. + * @param reader the reader registered for this sort's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the sort builder from xcontent + */ + public SortSpec(ParseField name, Writeable.Reader reader, SortParser parser) { + super(name, reader, parser); + } + + /** + * Specification of custom {@link Sort}. + * + * @param name the name by which this sort might be parsed or deserialized. Make sure that the sort builder returns this name for + * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including + * names from other plugins. + * @param reader the reader registered for this sort's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the sort builder from xcontent + */ + public SortSpec(String name, Writeable.Reader reader, SortParser parser) { + super(name, reader, parser); + } + } + /** * Specification for an {@link Aggregation}. */ @@ -302,8 +392,9 @@ class AggregationSpec extends SearchExtensionSpec AggregationSpec( * Specification for an {@link Aggregation}. * * @param name the name by which this aggregation might be parsed or deserialized. Make sure that the {@link AggregationBuilder} - * returns this from {@link NamedWriteable#getWriteableName()}. - * @param reader the reader registered for this aggregation's builder. Typically a reference to a constructor that takes a + * returns this from {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another + * registered name, including names from other plugins. + * @param reader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} * @param parser the parser the reads the aggregation builder from xcontent */ @@ -333,8 +425,9 @@ public AggregationSpec(String name, Writeable.Rea * * @param name holds the names by which this aggregation might be parsed. The {@link ParseField#getPreferredName()} is special as it * is the name by under which the reader is registered. So it is the name that the {@link AggregationBuilder} should return - * from {@link NamedWriteable#getWriteableName()}. - * @param reader the reader registered for this aggregation's builder. Typically a reference to a constructor that takes a + * from {@link NamedWriteable#getWriteableName()}. It is an error if {@link ParseField#getPreferredName()} conflicts with + * another registered name, including names from other plugins. + * @param reader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} * @param parser the parser the reads the aggregation builder from xcontent * @deprecated Use the ctor that takes a {@link ContextParser} instead @@ -348,8 +441,9 @@ public AggregationSpec(ParseField name, Writeable.Reader> getResultRea * parsed in a search request (within the ext element). */ class SearchExtSpec extends SearchExtensionSpec> { + /** + * Specification of custom {@link SearchExtBuilder}. + * + * @param name holds the names by which this search ext might be parsed. The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the search ext should use as its + * {@link NamedWriteable#getWriteableName()} too. It is an error if {@link ParseField#getPreferredName()} conflicts with + * another registered name, including names from other plugins. + * @param reader the reader registered for this search ext's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser function that reads the search ext builder from xcontent + */ public SearchExtSpec( ParseField name, Writeable.Reader reader, @@ -591,16 +702,50 @@ public SearchExtSpec( super(name, reader, parser); } + /** + * Specification of custom {@link SearchExtBuilder}. + * + * @param name the name by which this search ext might be parsed or deserialized. Make sure that the search ext builder returns this name for + * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including + * names from other plugins. + * @param reader the reader registered for this search ext's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser function that reads the search ext builder from xcontent + */ public SearchExtSpec(String name, Writeable.Reader reader, CheckedFunction parser) { super(name, reader, parser); } } + /** + * Specification for a {@link RescorerBuilder}. + */ class RescorerSpec> extends SearchExtensionSpec> { + /** + * Specification of custom {@link RescorerBuilder}. + * + * @param name holds the names by which this rescorer might be parsed. The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the rescorer should use as its + * {@link NamedWriteable#getWriteableName()} too. It is an error if {@link ParseField#getPreferredName()} conflicts with + * another registered name, including names from other plugins. + * @param reader the reader registered for this rescorer's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser function that reads the rescorer builder from xcontent + */ public RescorerSpec(ParseField name, Writeable.Reader reader, CheckedFunction parser) { super(name, reader, parser); } + /** + * Specification of custom {@link RescorerBuilder}. + * + * @param name the name by which this rescorer might be parsed or deserialized. Make sure that the rescorer builder returns this name for + * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including + * names from other plugins. + * @param reader the reader registered for this rescorer's builder. Typically, a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser function that reads the rescorer builder from xcontent + */ public RescorerSpec(String name, Writeable.Reader reader, CheckedFunction parser) { super(name, reader, parser); } @@ -624,7 +769,8 @@ class SearchExtensionSpec { * * @param name the name of the behavior as a {@linkplain ParseField}. The parser is registered under all names specified by the * {@linkplain ParseField} but the reader is only registered under the {@link ParseField#getPreferredName()} so be sure that - * that is the name that W's {@link NamedWriteable#getWriteableName()} returns. + * that is the name that W's {@link NamedWriteable#getWriteableName()} returns. It is an error if + * {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. * @param reader reader that reads the behavior from the internode protocol * @param parser parser that read the behavior from a REST request */ @@ -638,7 +784,8 @@ public SearchExtensionSpec(ParseField name, Writeable.Reader reader * Build the spec with a String. * * @param name the name of the behavior. The parser and the reader are are registered under this name so be sure that that is the - * name that W's {@link NamedWriteable#getWriteableName()} returns. + * name that W's {@link NamedWriteable#getWriteableName()} returns. It is an error if this name conflicts with another + * registered name, including names from other plugins. * @param reader reader that reads the behavior from the internode protocol * @param parser parser that read the behavior from a REST request */ diff --git a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java index 5adc86bc84679..4ea0217c5870f 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java @@ -66,11 +66,8 @@ public final class IndexMetaDataGenerations { final Map identifiers; IndexMetaDataGenerations(Map> lookup, Map identifiers) { - assert identifiers.keySet() - .equals(lookup.values().stream().flatMap(m -> m.values().stream()).collect(Collectors.toSet())) : "identifier mappings " - + identifiers - + " don't track the same blob ids as the lookup map " - + lookup; + assert identifiers.keySet().equals(lookup.values().stream().flatMap(m -> m.values().stream()).collect(Collectors.toSet())) + : "identifier mappings " + identifiers + " don't track the same blob ids as the lookup map " + lookup; assert lookup.values().stream().noneMatch(Map::isEmpty) : "Lookup contained empty map [" + lookup + "]"; this.lookup = Collections.unmodifiableMap(lookup); this.identifiers = Collections.unmodifiableMap(identifiers); diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryData.java b/server/src/main/java/org/opensearch/repositories/RepositoryData.java index 7b085c961ba23..7857df512cd75 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryData.java @@ -168,11 +168,8 @@ private RepositoryData( + shardGenerations.indices() + " but snapshots only reference indices " + indices.values(); - assert indexSnapshots.values() - .stream() - .noneMatch( - snapshotIdList -> new HashSet<>(snapshotIdList).size() != snapshotIdList.size() - ) : "Found duplicate snapshot ids per index in [" + indexSnapshots + "]"; + assert indexSnapshots.values().stream().noneMatch(snapshotIdList -> new HashSet<>(snapshotIdList).size() != snapshotIdList.size()) + : "Found duplicate snapshot ids per index in [" + indexSnapshots + "]"; } protected RepositoryData copy() { @@ -355,8 +352,8 @@ public RepositoryData addSnapshot( + "]"; newIndexMetaGenerations = IndexMetaDataGenerations.EMPTY; } else { - assert indexMetaBlobs.isEmpty() - || shardGenerations.indices().equals(indexMetaBlobs.keySet()) : "Shard generations contained indices " + assert indexMetaBlobs.isEmpty() || shardGenerations.indices().equals(indexMetaBlobs.keySet()) + : "Shard generations contained indices " + shardGenerations.indices() + " but indexMetaData was given for " + indexMetaBlobs.keySet(); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c64c8bb035a33..7d6cdef76198f 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -1909,13 +1909,8 @@ public ClusterState execute(ClusterState currentState) { meta.pendingGeneration() ); } - assert expectedGen == RepositoryData.EMPTY_REPO_GEN - || uninitializedMeta - || expectedGen == meta.generation() : "Expected non-empty generation [" - + expectedGen - + "] does not match generation tracked in [" - + meta - + "]"; + assert expectedGen == RepositoryData.EMPTY_REPO_GEN || uninitializedMeta || expectedGen == meta.generation() + : "Expected non-empty generation [" + expectedGen + "] does not match generation tracked in [" + meta + "]"; // If we run into the empty repo generation for the expected gen, the repo is assumed to have been cleared of // all contents by an external process so we reset the safe generation to the empty generation. final long safeGeneration = expectedGen == RepositoryData.EMPTY_REPO_GEN diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java index 972c5284b382f..3c66b0740536f 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -193,9 +193,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (nodesStatsRequest.indices().isSet(Flag.Search) && (request.hasParam("groups"))) { nodesStatsRequest.indices().groups(request.paramAsStringArray("groups", null)); } - if (nodesStatsRequest.indices().isSet(Flag.Indexing) && (request.hasParam("types"))) { - nodesStatsRequest.indices().types(request.paramAsStringArray("types", null)); - } if (nodesStatsRequest.indices().isSet(Flag.Segments)) { nodesStatsRequest.indices().includeSegmentFileSizes(request.paramAsBoolean("include_segment_file_sizes", false)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java index f5c5f926df36c..8b04e0b66dfae 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java @@ -68,15 +68,7 @@ public class RestGetFieldMappingAction extends BaseRestHandler { @Override public List routes() { - return unmodifiableList( - asList( - new Route(GET, "/_mapping/field/{fields}"), - new Route(GET, "/_mapping/{type}/field/{fields}"), - new Route(GET, "/{index}/_mapping/field/{fields}"), - new Route(GET, "/{index}/{type}/_mapping/field/{fields}"), - new Route(GET, "/{index}/_mapping/{type}/field/{fields}") - ) - ); + return unmodifiableList(asList(new Route(GET, "/_mapping/field/{fields}"), new Route(GET, "/{index}/_mapping/field/{fields}"))); } @Override @@ -87,21 +79,20 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); final String[] fields = Strings.splitStringByCommaToArray(request.param("fields")); - boolean includeTypeName = request.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY); - if (includeTypeName == false && types.length > 0) { - throw new IllegalArgumentException("Types cannot be specified unless include_type_name" + " is set to true."); - } + GetFieldMappingsRequest getMappingsRequest = new GetFieldMappingsRequest(); + getMappingsRequest.indices(indices).fields(fields).includeDefaults(request.paramAsBoolean("include_defaults", false)); + getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); + if (request.hasParam(INCLUDE_TYPE_NAME_PARAMETER)) { + boolean includeTypeName = request.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY); + if (includeTypeName) { + throw new IllegalArgumentException(INCLUDE_TYPE_NAME_PARAMETER + " no longer supports the value [true]."); + } deprecationLogger.deprecate("get_field_mapping_with_types", TYPES_DEPRECATION_MESSAGE); } - GetFieldMappingsRequest getMappingsRequest = new GetFieldMappingsRequest(); - getMappingsRequest.indices(indices).types(types).fields(fields).includeDefaults(request.paramAsBoolean("include_defaults", false)); - getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); - if (request.hasParam("local")) { deprecationLogger.deprecate( "get_field_mapping_local", @@ -116,7 +107,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC public RestResponse buildResponse(GetFieldMappingsResponse response, XContentBuilder builder) throws Exception { Map>> mappingsByIndex = response.mappings(); - boolean isPossibleSingleFieldRequest = indices.length == 1 && types.length == 1 && fields.length == 1; + boolean isPossibleSingleFieldRequest = indices.length == 1 && fields.length == 1; if (isPossibleSingleFieldRequest && isFieldMappingMissingField(mappingsByIndex)) { return new BytesRestResponse(OK, builder.startObject().endObject()); } @@ -126,7 +117,7 @@ public RestResponse buildResponse(GetFieldMappingsResponse response, XContentBui status = NOT_FOUND; } response.toXContent(builder, request); - return new BytesRestResponse(status, builder); + return new BytesRestResponse(RestStatus.OK, builder); } }); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java index 66b047ad9691c..f196eb4e41d6d 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.admin.indices; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchTimeoutException; @@ -41,15 +40,10 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; -import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.indices.TypeMissingException; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; @@ -60,19 +54,11 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; import java.util.List; -import java.util.Locale; -import java.util.Set; -import java.util.SortedSet; -import java.util.stream.Collectors; import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; import static org.opensearch.rest.RestRequest.Method.GET; -import static org.opensearch.rest.RestRequest.Method.HEAD; public class RestGetMappingAction extends BaseRestHandler { private static final Logger logger = LogManager.getLogger(RestGetMappingAction.class); @@ -92,13 +78,8 @@ public List routes() { asList( new Route(GET, "/_mapping"), new Route(GET, "/_mappings"), - new Route(GET, "/{index}/{type}/_mapping"), new Route(GET, "/{index}/_mapping"), - new Route(GET, "/{index}/_mappings"), - new Route(GET, "/{index}/_mappings/{type}"), - new Route(GET, "/{index}/_mapping/{type}"), - new Route(HEAD, "/{index}/_mapping/{type}"), - new Route(GET, "/_mapping/{type}") + new Route(GET, "/{index}/_mappings") ) ); } @@ -111,22 +92,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); - boolean includeTypeName = request.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY); - - if (request.method().equals(HEAD)) { - deprecationLogger.deprecate("get_mapping_types_removal", "Type exists requests are deprecated, as types have been deprecated."); - } else if (includeTypeName == false && types.length > 0) { - throw new IllegalArgumentException( - "Types cannot be provided in get mapping requests, unless" + " include_type_name is set to true." - ); - } - if (request.hasParam(INCLUDE_TYPE_NAME_PARAMETER)) { - deprecationLogger.deprecate("get_mapping_with_types", TYPES_DEPRECATION_MESSAGE); - } final GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); - getMappingsRequest.indices(indices).types(types); + getMappingsRequest.indices(indices); getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); final TimeValue timeout = request.paramAsTime("master_timeout", getMappingsRequest.masterNodeTimeout()); getMappingsRequest.masterNodeTimeout(timeout); @@ -146,59 +114,10 @@ public RestResponse buildResponse(final GetMappingsResponse response, final XCon if (threadPool.relativeTimeInMillis() - startTimeMs > timeout.millis()) { throw new OpenSearchTimeoutException("Timed out getting mappings"); } - final ImmutableOpenMap> mappingsByIndex = response - .getMappings(); - if (mappingsByIndex.isEmpty() && types.length != 0) { - builder.close(); - return new BytesRestResponse(channel, new TypeMissingException("_all", String.join(",", types))); - } - - final Set typeNames = new HashSet<>(); - for (final ObjectCursor> cursor : mappingsByIndex.values()) { - for (final ObjectCursor inner : cursor.value.keys()) { - typeNames.add(inner.value); - } - } - - final SortedSet difference = Sets.sortedDifference( - Arrays.stream(types).collect(Collectors.toSet()), - typeNames - ); - - // now remove requested aliases that contain wildcards that are simple matches - final List matches = new ArrayList<>(); - outer: for (final String pattern : difference) { - if (pattern.contains("*")) { - for (final String typeName : typeNames) { - if (Regex.simpleMatch(pattern, typeName)) { - matches.add(pattern); - continue outer; - } - } - } - } - difference.removeAll(matches); - - final RestStatus status; builder.startObject(); - { - if (difference.isEmpty()) { - status = RestStatus.OK; - } else { - status = RestStatus.NOT_FOUND; - final String message = String.format( - Locale.ROOT, - "type" + (difference.size() == 1 ? "" : "s") + " [%s] missing", - Strings.collectionToCommaDelimitedString(difference) - ); - builder.field("error", message); - builder.field("status", status.getStatus()); - } - response.toXContent(builder, request); - } + response.toXContent(builder, request); builder.endObject(); - - return new BytesRestResponse(status, builder); + return new BytesRestResponse(RestStatus.OK, builder); } }.onResponse(getMappingsResponse))); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesStatsAction.java index 696bff33a73a4..eabe14a7614ac 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -102,7 +102,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC + "options changed"; indicesStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, defaultIndicesOption)); indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); - indicesStatsRequest.types(Strings.splitStringByCommaToArray(request.param("types"))); Set metrics = Strings.tokenizeByCommaToSet(request.param("metric", "_all")); // short cut, if no metrics have been specified in URI @@ -139,10 +138,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesStatsRequest.groups(Strings.splitStringByCommaToArray(request.param("groups"))); } - if (request.hasParam("types")) { - indicesStatsRequest.types(Strings.splitStringByCommaToArray(request.param("types"))); - } - if (indicesStatsRequest.completion() && (request.hasParam("fields") || request.hasParam("completion_fields"))) { indicesStatsRequest.completionFields( request.paramAsStringArray("completion_fields", request.paramAsStringArray("fields", Strings.EMPTY_ARRAY)) diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java index a9f9595df1078..5da0b016c867d 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java @@ -50,7 +50,6 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; import static org.opensearch.client.Requests.putMappingRequest; -import static org.opensearch.index.mapper.MapperService.isMappingSourceTyped; import static org.opensearch.rest.RestRequest.Method.POST; import static org.opensearch.rest.RestRequest.Method.PUT; @@ -65,20 +64,8 @@ public List routes() { asList( new Route(POST, "/{index}/_mapping/"), new Route(PUT, "/{index}/_mapping/"), - new Route(POST, "/{index}/{type}/_mapping"), - new Route(PUT, "/{index}/{type}/_mapping"), - new Route(POST, "/{index}/_mapping/{type}"), - new Route(PUT, "/{index}/_mapping/{type}"), - new Route(POST, "/_mapping/{type}"), - new Route(PUT, "/_mapping/{type}"), new Route(POST, "/{index}/_mappings/"), - new Route(PUT, "/{index}/_mappings/"), - new Route(POST, "/{index}/{type}/_mappings"), - new Route(PUT, "/{index}/{type}/_mappings"), - new Route(POST, "/{index}/_mappings/{type}"), - new Route(PUT, "/{index}/_mappings/{type}"), - new Route(POST, "/_mappings/{type}"), - new Route(PUT, "/_mappings/{type}") + new Route(PUT, "/{index}/_mappings/") ) ); } @@ -90,21 +77,20 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + + PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); + final boolean includeTypeName = request.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY); if (request.hasParam(INCLUDE_TYPE_NAME_PARAMETER)) { deprecationLogger.deprecate("put_mapping_with_types", TYPES_DEPRECATION_MESSAGE); } - PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); - - final String type = request.param("type"); - putMappingRequest.type(includeTypeName ? type : MapperService.SINGLE_MAPPING_NAME); + putMappingRequest.type(MapperService.SINGLE_MAPPING_NAME); Map sourceAsMap = XContentHelper.convertToMap(request.requiredContent(), false, request.getXContentType()).v2(); - if (includeTypeName == false && (type != null || isMappingSourceTyped(MapperService.SINGLE_MAPPING_NAME, sourceAsMap))) { - throw new IllegalArgumentException( - "Types cannot be provided in put mapping requests, unless " + "the include_type_name parameter is set to true." - ); + + if (includeTypeName == false && MapperService.isMappingSourceTyped(MapperService.SINGLE_MAPPING_NAME, sourceAsMap)) { + throw new IllegalArgumentException("Types cannot be provided in put mapping requests"); } putMappingRequest.source(sourceAsMap); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java index ce5b816428e5f..726fd69dc29c1 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java @@ -32,17 +32,20 @@ package org.opensearch.rest.action.admin.indices; -import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; -import org.opensearch.action.admin.indices.flush.SyncedFlushResponse; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.rest.action.RestBuilderListener; +import org.opensearch.rest.RestStatus; +import org.opensearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.List; @@ -54,6 +57,8 @@ public class RestSyncedFlushAction extends BaseRestHandler { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestSyncedFlushAction.class); + @Override public List routes() { return unmodifiableList( @@ -73,17 +78,37 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); - syncedFlushRequest.indicesOptions(indicesOptions); - return channel -> client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { - builder.startObject(); - results.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(results.restStatus(), builder); - } - }); + DEPRECATION_LOGGER.deprecate( + "synced_flush", + "Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version." + ); + final FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); + flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); + return channel -> client.admin().indices().flush(flushRequest, new SimulateSyncedFlushResponseListener(channel)); + } + + static final class SimulateSyncedFlushResponseListener extends RestToXContentListener { + + SimulateSyncedFlushResponseListener(RestChannel channel) { + super(channel); + } + + @Override + public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder builder) throws Exception { + builder.startObject(); + buildSyncedFlushResponse(builder, flushResponse); + builder.endObject(); + final RestStatus restStatus = flushResponse.getFailedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT; + return new BytesRestResponse(restStatus, builder); + } + + private void buildSyncedFlushResponse(XContentBuilder builder, FlushResponse flushResponse) throws IOException { + builder.startObject("_shards"); + builder.field("total", flushResponse.getTotalShards()); + builder.field("successful", flushResponse.getSuccessfulShards()); + builder.field("failed", flushResponse.getFailedShards()); + // can't serialize the detail of each index as we don't have the shard count per index. + builder.endObject(); + } } } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 80c42da5c4b83..2c0eef6a8fdb8 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -850,8 +850,8 @@ Table buildTable( table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getCount()); table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getCount()); - table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getMemory()); - table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getMemory()); + table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getZeroMemory()); + table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getZeroMemory()); table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getIndexWriterMemory()); table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getIndexWriterMemory()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 30970511e865e..bce9b2d6b7e9d 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -501,7 +501,7 @@ Table buildTable( SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); - table.addCell(segmentsStats == null ? null : segmentsStats.getMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getZeroMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java index c258825e669b4..8d9d1937bdf56 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java @@ -155,7 +155,7 @@ private Table buildTable(final RestRequest request, ClusterStateResponse state, table.addCell(segment.getNumDocs()); table.addCell(segment.getDeletedDocs()); table.addCell(segment.getSize()); - table.addCell(segment.getMemoryInBytes()); + table.addCell(0L); table.addCell(segment.isCommitted()); table.addCell(segment.isSearch()); table.addCell(segment.getVersion()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index eea5da25776e9..f9aa1a5554e9e 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -381,7 +381,7 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); - table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getMemory)); + table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getIndexWriterMemory)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getVersionMapMemory)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getBitsetMemory)); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java index 52f4e6bc18e2f..c140514e3c92c 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestBulkAction.java @@ -38,13 +38,10 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.client.Requests; import org.opensearch.client.node.NodeClient; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; -import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestStatusToXContentListener; -import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; @@ -57,18 +54,16 @@ /** *
    - * { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" }
    + * { "index" : { "_index" : "test", "_id" : "1" }
      * { "type1" : { "field1" : "value1" } }
    - * { "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
    - * { "create" : { "_index" : "test", "_type" : "type1", "_id" : "1" }
    + * { "delete" : { "_index" : "test", "_id" : "2" } }
    + * { "create" : { "_index" : "test", "_id" : "1" }
      * { "type1" : { "field1" : "value1" } }
      * 
    */ public class RestBulkAction extends BaseRestHandler { private final boolean allowExplicitIndex; - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in bulk requests is deprecated."; public RestBulkAction(Settings settings) { this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings); @@ -77,15 +72,7 @@ public RestBulkAction(Settings settings) { @Override public List routes() { return unmodifiableList( - asList( - new Route(POST, "/_bulk"), - new Route(PUT, "/_bulk"), - new Route(POST, "/{index}/_bulk"), - new Route(PUT, "/{index}/_bulk"), - // Deprecated typed endpoints. - new Route(POST, "/{index}/{type}/_bulk"), - new Route(PUT, "/{index}/{type}/_bulk") - ) + asList(new Route(POST, "/_bulk"), new Route(PUT, "/_bulk"), new Route(POST, "/{index}/_bulk"), new Route(PUT, "/{index}/_bulk")) ); } @@ -98,12 +85,6 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); - String defaultType = request.param("type"); - if (defaultType == null) { - defaultType = MapperService.SINGLE_MAPPING_NAME; - } else { - deprecationLogger.deprecate("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE); - } String defaultRouting = request.param("routing"); FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); String defaultPipeline = request.param("pipeline"); @@ -117,7 +98,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC bulkRequest.add( request.requiredContent(), defaultIndex, - defaultType, defaultRouting, defaultFetchSourceContext, defaultPipeline, diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestDeleteAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestDeleteAction.java index 25a50a49d3aa0..f9f5933a44c95 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestDeleteAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestDeleteAction.java @@ -35,7 +35,6 @@ import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.client.node.NodeClient; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.VersionType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -50,19 +49,10 @@ import static org.opensearch.rest.RestRequest.Method.DELETE; public class RestDeleteAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in " - + "document index requests is deprecated, use the /{index}/_doc/{id} endpoint instead."; @Override public List routes() { - return unmodifiableList( - asList( - new Route(DELETE, "/{index}/_doc/{id}"), - // Deprecated typed endpoint. - new Route(DELETE, "/{index}/{type}/{id}") - ) - ); + return unmodifiableList(asList(new Route(DELETE, "/{index}/_doc/{id}"))); } @Override @@ -72,14 +62,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - DeleteRequest deleteRequest; - if (request.hasParam("type")) { - deprecationLogger.deprecate("delete_with_types", TYPES_DEPRECATION_MESSAGE); - deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); - } else { - deleteRequest = new DeleteRequest(request.param("index"), request.param("id")); - } - + DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("id")); deleteRequest.routing(request.param("routing")); deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); deleteRequest.setRefreshPolicy(request.param("refresh")); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java index 18d39edf887b5..a0ec48ee55451 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java @@ -36,7 +36,6 @@ import org.opensearch.action.get.GetResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.VersionType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -56,9 +55,6 @@ import static org.opensearch.rest.RestStatus.OK; public class RestGetAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in " - + "document get requests is deprecated, use the /{index}/_doc/{id} endpoint instead."; @Override public String getName() { @@ -67,27 +63,12 @@ public String getName() { @Override public List routes() { - return unmodifiableList( - asList( - new Route(GET, "/{index}/_doc/{id}"), - new Route(HEAD, "/{index}/_doc/{id}"), - // Deprecated typed endpoints. - new Route(GET, "/{index}/{type}/{id}"), - new Route(HEAD, "/{index}/{type}/{id}") - ) - ); + return unmodifiableList(asList(new Route(GET, "/{index}/_doc/{id}"), new Route(HEAD, "/{index}/_doc/{id}"))); } @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - GetRequest getRequest; - if (request.hasParam("type")) { - deprecationLogger.deprecate("get_with_types", TYPES_DEPRECATION_MESSAGE); - getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - } else { - getRequest = new GetRequest(request.param("index"), request.param("id")); - } - + GetRequest getRequest = new GetRequest(request.param("index"), request.param("id")); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); getRequest.preference(request.param("preference")); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java index cd6b3b16e79cd..801ab85039d2d 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java @@ -38,7 +38,6 @@ import org.opensearch.action.get.GetResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.rest.BaseRestHandler; @@ -64,20 +63,9 @@ */ public class RestGetSourceAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetSourceAction.class); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in get_source and exist_source" - + "requests is deprecated."; - @Override public List routes() { - return unmodifiableList( - asList( - new Route(GET, "/{index}/_source/{id}"), - new Route(HEAD, "/{index}/_source/{id}"), - new Route(GET, "/{index}/{type}/{id}/_source"), - new Route(HEAD, "/{index}/{type}/{id}/_source") - ) - ); + return unmodifiableList(asList(new Route(GET, "/{index}/_source/{id}"), new Route(HEAD, "/{index}/_source/{id}"))); } @Override @@ -87,13 +75,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final GetRequest getRequest; - if (request.hasParam("type")) { - deprecationLogger.deprecate("get_source_with_types", TYPES_DEPRECATION_MESSAGE); - getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - } else { - getRequest = new GetRequest(request.param("index"), request.param("id")); - } + final GetRequest getRequest = new GetRequest(request.param("index"), request.param("id")); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); getRequest.preference(request.param("preference")); @@ -140,13 +122,12 @@ public RestResponse buildResponse(final GetResponse response) throws Exception { */ private void checkResource(final GetResponse response) { final String index = response.getIndex(); - final String type = response.getType(); final String id = response.getId(); if (response.isExists() == false) { - throw new ResourceNotFoundException("Document not found [" + index + "]/[" + type + "]/[" + id + "]"); + throw new ResourceNotFoundException("Document not found [" + index + "]/[" + id + "]"); } else if (response.isSourceEmpty()) { - throw new ResourceNotFoundException("Source not found [" + index + "]/[" + type + "]/[" + id + "]"); + throw new ResourceNotFoundException("Source not found [" + index + "]/[" + id + "]"); } } } diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java index f3ffe0cdee02e..75f3967c32ba7 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java @@ -38,9 +38,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.VersionType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestActions; @@ -57,21 +55,10 @@ import static org.opensearch.rest.RestRequest.Method.PUT; public class RestIndexAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in document " - + "index requests is deprecated, use the typeless endpoints instead (/{index}/_doc/{id}, /{index}/_doc, " - + "or /{index}/_create/{id})."; @Override public List routes() { - return unmodifiableList( - asList( - new Route(POST, "/{index}/_doc/{id}"), - new Route(PUT, "/{index}/_doc/{id}"), - new Route(POST, "/{index}/{type}/{id}"), - new Route(PUT, "/{index}/{type}/{id}") - ) - ); + return unmodifiableList(asList(new Route(POST, "/{index}/_doc/{id}"), new Route(PUT, "/{index}/_doc/{id}"))); } @Override @@ -88,14 +75,7 @@ public String getName() { @Override public List routes() { - return unmodifiableList( - asList( - new Route(POST, "/{index}/_create/{id}"), - new Route(PUT, "/{index}/_create/{id}"), - new Route(POST, "/{index}/{type}/{id}/_create"), - new Route(PUT, "/{index}/{type}/{id}/_create") - ) - ); + return unmodifiableList(asList(new Route(POST, "/{index}/_create/{id}"), new Route(PUT, "/{index}/_create/{id}"))); } @Override @@ -127,7 +107,7 @@ public String getName() { @Override public List routes() { - return unmodifiableList(asList(new Route(POST, "/{index}/_doc"), new Route(POST, "/{index}/{type}"))); + return unmodifiableList(asList(new Route(POST, "/{index}/_doc"))); } @Override @@ -143,15 +123,8 @@ public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - IndexRequest indexRequest; - final String type = request.param("type"); - if (type != null && type.equals(MapperService.SINGLE_MAPPING_NAME) == false) { - deprecationLogger.deprecate("index_with_types", TYPES_DEPRECATION_MESSAGE); - indexRequest = new IndexRequest(request.param("index"), type, request.param("id")); - } else { - indexRequest = new IndexRequest(request.param("index")); - indexRequest.id(request.param("id")); - } + IndexRequest indexRequest = new IndexRequest(request.param("index")); + indexRequest.id(request.param("id")); indexRequest.routing(request.param("routing")); indexRequest.setPipeline(request.param("pipeline")); indexRequest.source(request.requiredContent(), request.getXContentType()); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestMultiGetAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestMultiGetAction.java index 514dc26b3e7ee..6713bddfd837d 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestMultiGetAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestMultiGetAction.java @@ -35,7 +35,6 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.rest.BaseRestHandler; @@ -52,9 +51,6 @@ import static org.opensearch.rest.RestRequest.Method.POST; public class RestMultiGetAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestMultiGetAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in multi get requests is deprecated."; - private final boolean allowExplicitIndex; public RestMultiGetAction(Settings settings) { @@ -64,15 +60,7 @@ public RestMultiGetAction(Settings settings) { @Override public List routes() { return unmodifiableList( - asList( - new Route(GET, "/_mget"), - new Route(POST, "/_mget"), - new Route(GET, "/{index}/_mget"), - new Route(POST, "/{index}/_mget"), - // Deprecated typed endpoints. - new Route(GET, "/{index}/{type}/_mget"), - new Route(POST, "/{index}/{type}/_mget") - ) + asList(new Route(GET, "/_mget"), new Route(POST, "/_mget"), new Route(GET, "/{index}/_mget"), new Route(POST, "/{index}/_mget")) ); } @@ -83,10 +71,6 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - if (request.param("type") != null) { - deprecationLogger.deprecate("mget_with_types", TYPES_DEPRECATION_MESSAGE); - } - MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh())); multiGetRequest.preference(request.param("preference")); @@ -105,22 +89,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request); try (XContentParser parser = request.contentOrSourceParamParser()) { - multiGetRequest.add( - request.param("index"), - request.param("type"), - sFields, - defaultFetchSource, - request.param("routing"), - parser, - allowExplicitIndex - ); - } - - for (MultiGetRequest.Item item : multiGetRequest.getItems()) { - if (item.type() != null) { - deprecationLogger.deprecate("mget_with_types", TYPES_DEPRECATION_MESSAGE); - break; - } + multiGetRequest.add(request.param("index"), sFields, defaultFetchSource, request.param("routing"), parser, allowExplicitIndex); } return channel -> client.multiGet(multiGetRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestMultiTermVectorsAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestMultiTermVectorsAction.java index c2196dc84410d..2c52e75dc47b3 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestMultiTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestMultiTermVectorsAction.java @@ -37,7 +37,6 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -79,13 +78,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest(); TermVectorsRequest template = new TermVectorsRequest().index(request.param("index")); - if (request.hasParam("type")) { - deprecationLogger.deprecate("mtermvectors_with_types", TYPES_DEPRECATION_MESSAGE); - template.type(request.param("type")); - } else { - template.type(MapperService.SINGLE_MAPPING_NAME); - } - RestTermVectorsAction.readURIParameters(template, request); multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param("ids"))); request.withContentOrSourceParamParserOrNull(p -> multiTermVectorsRequest.add(template, p)); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java index ad11adba6c585..36f9e43e71362 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java @@ -35,10 +35,8 @@ import org.opensearch.action.termvectors.TermVectorsRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.VersionType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestActions; @@ -59,7 +57,6 @@ * TermVectorsRequest. */ public class RestTermVectorsAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestTermVectorsAction.class); public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] " + "Specifying types in term vector requests is deprecated."; @Override @@ -86,14 +83,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - TermVectorsRequest termVectorsRequest; - if (request.hasParam("type")) { - deprecationLogger.deprecate("termvectors_with_types", TYPES_DEPRECATION_MESSAGE); - termVectorsRequest = new TermVectorsRequest(request.param("index"), request.param("type"), request.param("id")); - } else { - termVectorsRequest = new TermVectorsRequest(request.param("index"), MapperService.SINGLE_MAPPING_NAME, request.param("id")); - } - + TermVectorsRequest termVectorsRequest = new TermVectorsRequest(request.param("index"), request.param("id")); if (request.hasContentOrSourceParam()) { try (XContentParser parser = request.contentOrSourceParamParser()) { TermVectorsRequest.parseRequest(termVectorsRequest, parser); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestUpdateAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestUpdateAction.java index 7afb0b6cba87c..832d8da4a8fdd 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestUpdateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestUpdateAction.java @@ -38,7 +38,6 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.node.NodeClient; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.VersionType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -54,19 +53,10 @@ import static org.opensearch.rest.RestRequest.Method.POST; public class RestUpdateAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestUpdateAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in " - + "document update requests is deprecated, use the endpoint /{index}/_update/{id} instead."; @Override public List routes() { - return unmodifiableList( - asList( - new Route(POST, "/{index}/_update/{id}"), - // Deprecated typed endpoint. - new Route(POST, "/{index}/{type}/{id}/_update") - ) - ); + return unmodifiableList(asList(new Route(POST, "/{index}/_update/{id}"))); } @Override @@ -77,12 +67,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { UpdateRequest updateRequest; - if (request.hasParam("type")) { - deprecationLogger.deprecate("update_with_types", TYPES_DEPRECATION_MESSAGE); - updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id")); - } else { - updateRequest = new UpdateRequest(request.param("index"), request.param("id")); - } + updateRequest = new UpdateRequest(request.param("index"), request.param("id")); updateRequest.routing(request.param("routing")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestCountAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestCountAction.java index 79e4e430038a1..04ee5fdd5b621 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestCountAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestCountAction.java @@ -37,7 +37,6 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.rest.BaseRestHandler; @@ -59,8 +58,6 @@ import static org.opensearch.search.internal.SearchContext.DEFAULT_TERMINATE_AFTER; public class RestCountAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestCountAction.class); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in count requests is deprecated."; @Override public List routes() { @@ -104,11 +101,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC searchSourceBuilder.minScore(minScore); } - if (request.hasParam("type")) { - deprecationLogger.deprecate("count_with_types", TYPES_DEPRECATION_MESSAGE); - countRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); - } - countRequest.preference(request.param("preference")); final int terminateAfter = request.paramAsInt("terminate_after", DEFAULT_TERMINATE_AFTER); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestExplainAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestExplainAction.java index 46841e599bfda..2b73e145cf5ca 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestExplainAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestExplainAction.java @@ -35,7 +35,6 @@ import org.opensearch.action.explain.ExplainRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.query.QueryBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -55,20 +54,10 @@ * Rest action for computing a score explanation for specific documents. */ public class RestExplainAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestExplainAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] " + "Specifying a type in explain requests is deprecated."; @Override public List routes() { - return unmodifiableList( - asList( - new Route(GET, "/{index}/_explain/{id}"), - new Route(POST, "/{index}/_explain/{id}"), - // Deprecated typed endpoints. - new Route(GET, "/{index}/{type}/{id}/_explain"), - new Route(POST, "/{index}/{type}/{id}/_explain") - ) - ); + return unmodifiableList(asList(new Route(GET, "/{index}/_explain/{id}"), new Route(POST, "/{index}/_explain/{id}"))); } @Override @@ -78,14 +67,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - ExplainRequest explainRequest; - if (request.hasParam("type")) { - deprecationLogger.deprecate("explain_with_types", TYPES_DEPRECATION_MESSAGE); - explainRequest = new ExplainRequest(request.param("index"), request.param("type"), request.param("id")); - } else { - explainRequest = new ExplainRequest(request.param("index"), request.param("id")); - } - + ExplainRequest explainRequest = new ExplainRequest(request.param("index"), request.param("id")); explainRequest.parent(request.param("parent")); explainRequest.routing(request.param("routing")); explainRequest.preference(request.param("preference")); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java index de433530d0629..8c711b31d0720 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java @@ -68,7 +68,6 @@ public class RestMultiSearchAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestMultiSearchAction.class); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in multi search requests is deprecated."; private static final Set RESPONSE_PARAMS; @@ -108,13 +107,6 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final MultiSearchRequest multiSearchRequest = parseRequest(request, client.getNamedWriteableRegistry(), allowExplicitIndex); - // Emit a single deprecation message if any search request contains types. - for (SearchRequest searchRequest : multiSearchRequest.requests()) { - if (searchRequest.types().length > 0) { - deprecationLogger.deprecate("msearch_with_types", TYPES_DEPRECATION_MESSAGE); - break; - } - } return channel -> { final RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); cancellableClient.execute(MultiSearchAction.INSTANCE, multiSearchRequest, new RestToXContentListener<>(channel)); @@ -192,7 +184,6 @@ public static void parseMultiLineRequest( ) throws IOException { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - String[] types = Strings.splitStringByCommaToArray(request.param("type")); String searchType = request.param("search_type"); boolean ccsMinimizeRoundtrips = request.paramAsBoolean("ccs_minimize_roundtrips", true); String routing = request.param("routing"); @@ -206,7 +197,6 @@ public static void parseMultiLineRequest( consumer, indices, indicesOptions, - types, routing, searchType, ccsMinimizeRoundtrips, diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index 54504685d4bfb..e0c984cec5430 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -199,10 +199,6 @@ public static void parseSearchRequest( searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); } - if (request.hasParam("type")) { - deprecationLogger.deprecate("search_with_types", TYPES_DEPRECATION_MESSAGE); - searchRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); - } searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 9ee1e6c15a2e7..2df62d60786ba 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -55,7 +55,6 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ObjectMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.query.AbstractQueryBuilder; import org.opensearch.index.query.ParsedQuery; import org.opensearch.index.query.QueryBuilder; @@ -209,7 +208,6 @@ final class DefaultSearchContext extends SearchContext { request::nowInMillis, shardTarget.getClusterAlias() ); - queryShardContext.setTypes(request.types()); queryBoost = request.indexBoost(); this.lowLevelCancellation = lowLevelCancellation; } @@ -321,11 +319,6 @@ public void preProcess(boolean rewrite) { @Override public Query buildFilteredQuery(Query query) { List filters = new ArrayList<>(); - Query typeFilter = createTypeFilter(queryShardContext.getTypes()); - if (typeFilter != null) { - filters.add(typeFilter); - } - if (mapperService().hasNested() && new NestedHelper(mapperService()).mightMatchNestedDocs(query) && (aliasFilter == null || new NestedHelper(mapperService()).mightMatchNestedDocs(aliasFilter))) { @@ -357,17 +350,6 @@ && new NestedHelper(mapperService()).mightMatchNestedDocs(query) } } - private Query createTypeFilter(String[] types) { - if (types != null && types.length >= 1) { - if (mapperService().documentMapper() == null) { - return null; - } - TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(mapperService().documentMapper().type()); - return ft.typeFilter(types); - } - return null; - } - @Override public ShardSearchContextId id() { return readerContext.id(); diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 80ed3268780c3..daae83e408530 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.Explanation; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; @@ -98,7 +99,6 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable documentFields, Map metaFields) { - this(docId, id, type, null, documentFields, metaFields); + public SearchHit(int docId, String id, Map documentFields, Map metaFields) { + this(docId, id, null, documentFields, metaFields); } public SearchHit( int nestedTopDocId, String id, - Text type, NestedIdentity nestedIdentity, Map documentFields, Map metaFields @@ -155,7 +154,6 @@ public SearchHit( } else { this.id = null; } - this.type = type; this.nestedIdentity = nestedIdentity; this.documentFields = documentFields == null ? emptyMap() : documentFields; this.metaFields = metaFields == null ? emptyMap() : metaFields; @@ -165,7 +163,9 @@ public SearchHit(StreamInput in) throws IOException { docId = -1; score = in.readFloat(); id = in.readOptionalText(); - type = in.readOptionalText(); + if (in.getVersion().before(Version.V_2_0_0)) { + in.readOptionalText(); + } nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); version = in.readLong(); seqNo = in.readZLong(); @@ -261,11 +261,15 @@ private void writeFields(StreamOutput out, Map fields) th } } + private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); + @Override public void writeTo(StreamOutput out) throws IOException { out.writeFloat(score); out.writeOptionalText(id); - out.writeOptionalText(type); + if (out.getVersion().before(Version.V_2_0_0)) { + out.writeOptionalText(SINGLE_MAPPING_TYPE); + } out.writeOptionalWriteable(nestedIdentity); out.writeLong(version); out.writeZLong(seqNo); @@ -376,17 +380,6 @@ public String getId() { return id != null ? id.string() : null; } - /** - * The type of the document. - * - * @deprecated Types are in the process of being removed. Instead of using a type, prefer to - * filter on a field on the document. - */ - @Deprecated - public String getType() { - return type != null ? type.string() : null; - } - /** * If this is a nested hit then nested reference information is returned otherwise null is returned. */ @@ -597,7 +590,6 @@ public void setInnerHits(Map innerHits) { public static class Fields { static final String _INDEX = "_index"; - static final String _TYPE = "_type"; static final String _ID = "_id"; static final String _VERSION = "_version"; static final String _SEQ_NO = "_seq_no"; @@ -641,9 +633,6 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t if (index != null) { builder.field(Fields._INDEX, RemoteClusterAware.buildRemoteIndexName(clusterAlias, index)); } - if (type != null) { - builder.field(Fields._TYPE, type); - } if (id != null) { builder.field(Fields._ID, id); } @@ -762,7 +751,6 @@ public static SearchHit fromXContent(XContentParser parser) { } public static void declareInnerHitsParseFields(ObjectParser, Void> parser) { - parser.declareString((map, value) -> map.put(Fields._TYPE, new Text(value)), new ParseField(Fields._TYPE)); parser.declareString((map, value) -> map.put(Fields._INDEX, value), new ParseField(Fields._INDEX)); parser.declareString((map, value) -> map.put(Fields._ID, value), new ParseField(Fields._ID)); parser.declareString((map, value) -> map.put(Fields._NODE, value), new ParseField(Fields._NODE)); @@ -822,12 +810,11 @@ public static void declareInnerHitsParseFields(ObjectParser, public static SearchHit createFromMap(Map values) { String id = get(Fields._ID, values, null); - Text type = get(Fields._TYPE, values, null); NestedIdentity nestedIdentity = get(NestedIdentity._NESTED, values, null); Map metaFields = get(METADATA_FIELDS, values, Collections.emptyMap()); Map documentFields = get(DOCUMENT_FIELDS, values, Collections.emptyMap()); - SearchHit searchHit = new SearchHit(-1, id, type, nestedIdentity, documentFields, metaFields); + SearchHit searchHit = new SearchHit(-1, id, nestedIdentity, documentFields, metaFields); String index = get(Fields._INDEX, values, null); String clusterAlias = null; if (index != null) { @@ -972,7 +959,6 @@ public boolean equals(Object obj) { } SearchHit other = (SearchHit) obj; return Objects.equals(id, other.id) - && Objects.equals(type, other.type) && Objects.equals(nestedIdentity, other.nestedIdentity) && Objects.equals(version, other.version) && Objects.equals(seqNo, other.seqNo) diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index b18035cd49303..cdc2509bbcb00 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -112,6 +112,7 @@ import org.opensearch.plugins.SearchPlugin.SearchExtSpec; import org.opensearch.plugins.SearchPlugin.SearchExtensionSpec; import org.opensearch.plugins.SearchPlugin.SignificanceHeuristicSpec; +import org.opensearch.plugins.SearchPlugin.SortSpec; import org.opensearch.plugins.SearchPlugin.SuggesterSpec; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.BaseAggregationBuilder; @@ -345,7 +346,7 @@ public SearchModule(Settings settings, List plugins) { registerScoreFunctions(plugins); registerQueryParsers(plugins); registerRescorers(plugins); - registerSorts(); + registerSortParsers(plugins); registerValueFormats(); registerSignificanceHeuristics(plugins); this.valuesSourceRegistry = registerAggregations(plugins); @@ -882,13 +883,6 @@ private void registerRescorer(RescorerSpec spec) { namedWriteables.add(new NamedWriteableRegistry.Entry(RescorerBuilder.class, spec.getName().getPreferredName(), spec.getReader())); } - private void registerSorts() { - namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::new)); - namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new)); - namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new)); - namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new)); - } - private void registerFromPlugin(List plugins, Function> producer, Consumer consumer) { for (SearchPlugin plugin : plugins) { for (T t : producer.apply(plugin)) { @@ -1214,6 +1208,20 @@ private void registerQueryParsers(List plugins) { registerFromPlugin(plugins, SearchPlugin::getQueries, this::registerQuery); } + private void registerSortParsers(List plugins) { + registerSort(new SortSpec<>(FieldSortBuilder.NAME, FieldSortBuilder::new, FieldSortBuilder::fromXContentObject)); + registerSort(new SortSpec<>(ScriptSortBuilder.NAME, ScriptSortBuilder::new, ScriptSortBuilder::fromXContent)); + registerSort( + new SortSpec<>( + new ParseField(GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder.ALTERNATIVE_NAME), + GeoDistanceSortBuilder::new, + GeoDistanceSortBuilder::fromXContent + ) + ); + registerSort(new SortSpec<>(ScoreSortBuilder.NAME, ScoreSortBuilder::new, ScoreSortBuilder::fromXContent)); + registerFromPlugin(plugins, SearchPlugin::getSorts, this::registerSort); + } + private void registerIntervalsSourceProviders() { namedWriteables.addAll(getIntervalsSourceProviderNamedWritables()); } @@ -1246,6 +1254,11 @@ public static List getIntervalsSourceProviderNamed IntervalsSourceProvider.Wildcard.NAME, IntervalsSourceProvider.Wildcard::new ), + new NamedWriteableRegistry.Entry( + IntervalsSourceProvider.class, + IntervalsSourceProvider.Regexp.NAME, + IntervalsSourceProvider.Regexp::new + ), new NamedWriteableRegistry.Entry( IntervalsSourceProvider.class, IntervalsSourceProvider.Fuzzy.NAME, @@ -1260,6 +1273,17 @@ private void registerQuery(QuerySpec spec) { namedXContents.add(new NamedXContentRegistry.Entry(QueryBuilder.class, spec.getName(), (p, c) -> spec.getParser().fromXContent(p))); } + private void registerSort(SortSpec spec) { + namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, spec.getName().getPreferredName(), spec.getReader())); + namedXContents.add( + new NamedXContentRegistry.Entry( + SortBuilder.class, + spec.getName(), + (p, c) -> spec.getParser().fromXContent(p, spec.getName().getPreferredName()) + ) + ); + } + public FetchPhase getFetchPhase() { return new FetchPhase(fetchSubPhases); } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 7dc993f4f1cd9..eda9153381046 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -448,8 +448,8 @@ public void executeQueryPhase( SearchShardTask task, ActionListener listener ) { - assert request.canReturnNullResponseIfMatchNoDocs() == false - || request.numberOfShards() > 1 : "empty responses require more than one shard"; + assert request.canReturnNullResponseIfMatchNoDocs() == false || request.numberOfShards() > 1 + : "empty responses require more than one shard"; final IndexShard shard = getShard(request); rewriteAndFetchShardRequest(shard, request, new ActionListener() { @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java index 2c2421a249549..7c403bcb9dbbf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java @@ -226,7 +226,8 @@ public final void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeGenericValue(metadata); if (out.getVersion().before(LegacyESVersion.V_7_8_0)) { - assert pipelineAggregatorsForBwcSerialization != null : "serializing to pre-7.8.0 versions should have called mergePipelineTreeForBWCSerialization"; + assert pipelineAggregatorsForBwcSerialization != null + : "serializing to pre-7.8.0 versions should have called mergePipelineTreeForBWCSerialization"; out.writeNamedWriteableList(pipelineAggregatorsForBwcSerialization); } doWriteTo(out); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java index 3477c203d4887..44a29e2251b13 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java @@ -47,8 +47,10 @@ import org.opensearch.index.mapper.StringFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; +import java.util.Objects; import java.util.function.LongConsumer; /** @@ -68,10 +70,11 @@ class BinaryValuesSource extends SingleDimensionValuesSource { CheckedFunction docValuesFunc, DocValueFormat format, boolean missingBucket, + MissingOrder missingOrder, int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); this.breakerConsumer = breakerConsumer; this.docValuesFunc = docValuesFunc; this.values = bigArrays.newObjectArray(Math.min(size, 100)); @@ -101,10 +104,9 @@ void copyCurrent(int slot) { @Override int compare(int from, int to) { if (missingBucket) { - if (values.get(from) == null) { - return values.get(to) == null ? 0 : -1 * reverseMul; - } else if (values.get(to) == null) { - return reverseMul; + int result = missingOrder.compare(() -> Objects.isNull(values.get(from)), () -> Objects.isNull(values.get(to)), reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(values.get(from), values.get(to)); @@ -113,10 +115,9 @@ int compare(int from, int to) { @Override int compareCurrent(int slot) { if (missingBucket) { - if (currentValue == null) { - return values.get(slot) == null ? 0 : -1 * reverseMul; - } else if (values.get(slot) == null) { - return reverseMul; + int result = missingOrder.compare(() -> Objects.isNull(currentValue), () -> Objects.isNull(values.get(slot)), reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(currentValue, values.get(slot)); @@ -125,10 +126,9 @@ int compareCurrent(int slot) { @Override int compareCurrentWithAfter() { if (missingBucket) { - if (currentValue == null) { - return afterValue == null ? 0 : -1 * reverseMul; - } else if (afterValue == null) { - return reverseMul; + int result = missingOrder.compare(() -> Objects.isNull(currentValue), () -> Objects.isNull(afterValue), reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(currentValue, afterValue); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index dd399e406177d..1d48850bee122 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -70,6 +70,7 @@ import org.opensearch.search.aggregations.MultiBucketCollector; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.bucket.BucketsAggregator; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.searchafter.SearchAfterBuilder; import org.opensearch.search.sort.SortAndFormats; @@ -89,6 +90,7 @@ final class CompositeAggregator extends BucketsAggregator { private final int size; private final List sourceNames; private final int[] reverseMuls; + private final MissingOrder[] missingOrders; private final List formats; private final CompositeKey rawAfterKey; @@ -117,6 +119,7 @@ final class CompositeAggregator extends BucketsAggregator { this.size = size; this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray(); + this.missingOrders = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::missingOrder).toArray(MissingOrder[]::new); this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); this.sources = new SingleDimensionValuesSource[sourceConfigs.length]; // check that the provided size is not greater than the search.max_buckets setting @@ -189,7 +192,15 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I CompositeKey key = queue.toCompositeKey(slot); InternalAggregations aggs = subAggsForBuckets[slot]; int docCount = queue.getDocCount(slot); - buckets[queue.size()] = new InternalComposite.InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); + buckets[queue.size()] = new InternalComposite.InternalBucket( + sourceNames, + formats, + key, + reverseMuls, + missingOrders, + docCount, + aggs + ); } CompositeKey lastBucket = num > 0 ? buckets[num - 1].getRawKey() : null; return new InternalAggregation[] { @@ -201,6 +212,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I Arrays.asList(buckets), lastBucket, reverseMuls, + missingOrders, earlyTerminated, metadata() ) }; @@ -208,7 +220,18 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I @Override public InternalAggregation buildEmptyAggregation() { - return new InternalComposite(name, size, sourceNames, formats, Collections.emptyList(), null, reverseMuls, false, metadata()); + return new InternalComposite( + name, + size, + sourceNames, + formats, + Collections.emptyList(), + null, + reverseMuls, + missingOrders, + false, + metadata() + ); } private void finishLeaf() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 7fc90f6c6d373..9ac43c812e41c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.bucket.composite; +import org.opensearch.Version; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -39,6 +40,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.script.Script; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -49,6 +51,8 @@ import java.time.ZoneId; import java.util.Objects; +import static org.opensearch.search.aggregations.bucket.missing.MissingOrder.fromString; + /** * A {@link ValuesSource} builder for {@link CompositeAggregationBuilder} */ @@ -59,6 +63,7 @@ public abstract class CompositeValuesSourceBuilder createValuesSource( private final DocValueFormat format; private final int reverseMul; private final boolean missingBucket; + private final MissingOrder missingOrder; private final boolean hasScript; private final SingleDimensionValuesSourceProvider singleDimensionValuesSourceProvider; @@ -83,6 +85,7 @@ SingleDimensionValuesSource createValuesSource( DocValueFormat format, SortOrder order, boolean missingBucket, + MissingOrder missingOrder, boolean hasScript, SingleDimensionValuesSourceProvider singleDimensionValuesSourceProvider ) { @@ -94,6 +97,7 @@ SingleDimensionValuesSource createValuesSource( this.missingBucket = missingBucket; this.hasScript = hasScript; this.singleDimensionValuesSourceProvider = singleDimensionValuesSourceProvider; + this.missingOrder = missingOrder; } /** @@ -132,6 +136,13 @@ boolean missingBucket() { return missingBucket; } + /** + * Return the {@link MissingOrder} for the config. + */ + MissingOrder missingOrder() { + return missingOrder; + } + /** * Returns true if the source contains a script that can change the value. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index 888b16b348a14..c1e64f57c10cc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -43,6 +43,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.script.Script; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -54,6 +55,7 @@ public class CompositeValuesSourceParserHelper { static , T> void declareValuesSourceFields(AbstractObjectParser objectParser) { objectParser.declareField(VB::field, XContentParser::text, new ParseField("field"), ObjectParser.ValueType.STRING); objectParser.declareBoolean(VB::missingBucket, new ParseField("missing_bucket")); + objectParser.declareString(VB::missingOrder, new ParseField(MissingOrder.NAME)); objectParser.declareField(VB::userValuetypeHint, p -> { ValueType valueType = ValueType.lenientParse(p.text()); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 51c014cf71ec1..acab1790bfa21 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -52,6 +52,7 @@ import org.opensearch.search.aggregations.bucket.histogram.DateIntervalConsumer; import org.opensearch.search.aggregations.bucket.histogram.DateIntervalWrapper; import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -81,6 +82,7 @@ CompositeValuesSourceConfig apply( boolean hasScript, // probably redundant with the config, but currently we check this two different ways... String format, boolean missingBucket, + MissingOrder missingOrder, SortOrder order ); } @@ -288,7 +290,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, org.opensearch.common.collect.List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC), - (valuesSourceConfig, rounding, name, hasScript, format, missingBucket, order) -> { + (valuesSourceConfig, rounding, name, hasScript, format, missingBucket, missingOrder, order) -> { ValuesSource.Numeric numeric = (ValuesSource.Numeric) valuesSourceConfig.getValuesSource(); // TODO once composite is plugged in to the values source registry or at least understands Date values source types use it // here @@ -304,6 +306,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { docValueFormat, order, missingBucket, + missingOrder, hasScript, ( BigArrays bigArrays, @@ -319,6 +322,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { roundingValuesSource::round, compositeValuesSourceConfig.format(), compositeValuesSourceConfig.missingBucket(), + compositeValuesSourceConfig.missingOrder(), size, compositeValuesSourceConfig.reverseMul() ); @@ -339,6 +343,6 @@ protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardCon Rounding rounding = dateHistogramInterval.createRounding(timeZone(), offset); return queryShardContext.getValuesSourceRegistry() .getAggregator(REGISTRY_KEY, config) - .apply(config, rounding, name, config.script() != null, format(), missingBucket(), order()); + .apply(config, rounding, name, config.script() != null, format(), missingBucket(), missingOrder(), order()); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java index 0af5c2a901093..fb94ff194b950 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -44,6 +44,7 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; @@ -63,10 +64,11 @@ class DoubleValuesSource extends SingleDimensionValuesSource { CheckedFunction docValuesFunc, DocValueFormat format, boolean missingBucket, + MissingOrder missingOrder, int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); this.docValuesFunc = docValuesFunc; this.bits = missingBucket ? new BitArray(100, bigArrays) : null; this.values = bigArrays.newDoubleArray(Math.min(size, 100), false); @@ -89,10 +91,9 @@ void copyCurrent(int slot) { @Override int compare(int from, int to) { if (missingBucket) { - if (bits.get(from) == false) { - return bits.get(to) ? -1 * reverseMul : 0; - } else if (bits.get(to) == false) { - return reverseMul; + int result = missingOrder.compare(() -> bits.get(from) == false, () -> bits.get(to) == false, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(values.get(from), values.get(to)); @@ -101,10 +102,9 @@ int compare(int from, int to) { @Override int compareCurrent(int slot) { if (missingBucket) { - if (missingCurrentValue) { - return bits.get(slot) ? -1 * reverseMul : 0; - } else if (bits.get(slot) == false) { - return reverseMul; + int result = missingOrder.compare(() -> missingCurrentValue, () -> bits.get(slot) == false, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(currentValue, values.get(slot)); @@ -113,10 +113,9 @@ int compareCurrent(int slot) { @Override int compareCurrentWithAfter() { if (missingBucket) { - if (missingCurrentValue) { - return afterValue != null ? -1 * reverseMul : 0; - } else if (afterValue == null) { - return reverseMul; + int result = missingOrder.compare(() -> missingCurrentValue, () -> afterValue == null, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(currentValue, afterValue); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index b4049568af4a8..5d47a44a26222 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -49,6 +49,7 @@ import org.opensearch.search.aggregations.bucket.geogrid.CellIdSource; import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -72,6 +73,7 @@ CompositeValuesSourceConfig apply( boolean hasScript, // probably redundant with the config, but currently we check this two different ways... String format, boolean missingBucket, + MissingOrder missingOrder, SortOrder order ); } @@ -103,7 +105,7 @@ static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, - (valuesSourceConfig, precision, boundingBox, name, hasScript, format, missingBucket, order) -> { + (valuesSourceConfig, precision, boundingBox, name, hasScript, format, missingBucket, missingOrder, order) -> { ValuesSource.GeoPoint geoPoint = (ValuesSource.GeoPoint) valuesSourceConfig.getValuesSource(); // is specified in the builder. final MappedFieldType fieldType = valuesSourceConfig.fieldType(); @@ -115,6 +117,7 @@ static void register(ValuesSourceRegistry.Builder builder) { DocValueFormat.GEOTILE, order, missingBucket, + missingOrder, hasScript, ( BigArrays bigArrays, @@ -132,6 +135,7 @@ static void register(ValuesSourceRegistry.Builder builder) { LongUnaryOperator.identity(), compositeValuesSourceConfig.format(), compositeValuesSourceConfig.missingBucket(), + compositeValuesSourceConfig.missingOrder(), size, compositeValuesSourceConfig.reverseMul() ); @@ -220,7 +224,7 @@ protected ValuesSourceType getDefaultValuesSourceType() { protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config) throws IOException { return queryShardContext.getValuesSourceRegistry() .getAggregator(REGISTRY_KEY, config) - .apply(config, precision, geoBoundingBox(), name, script() != null, format(), missingBucket(), order()); + .apply(config, precision, geoBoundingBox(), name, script() != null, format(), missingBucket(), missingOrder(), order()); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java index 7ad38ec1a3b38..2daddc9647f44 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java @@ -39,6 +39,7 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; import java.util.function.LongUnaryOperator; @@ -57,10 +58,11 @@ class GeoTileValuesSource extends LongValuesSource { LongUnaryOperator rounding, DocValueFormat format, boolean missingBucket, + MissingOrder missingOrder, int size, int reverseMul ) { - super(bigArrays, fieldType, docValuesFunc, rounding, format, missingBucket, size, reverseMul); + super(bigArrays, fieldType, docValuesFunc, rounding, format, missingBucket, missingOrder, size, reverseMul); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java index 8c000f7a6190c..b4db4d8dd2a36 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java @@ -46,6 +46,7 @@ import org.opensearch.index.mapper.StringFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; @@ -71,10 +72,11 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource { CheckedFunction docValuesFunc, DocValueFormat format, boolean missingBucket, + MissingOrder missingOrder, int size, int reverseMul ) { - super(bigArrays, format, type, missingBucket, size, reverseMul); + super(bigArrays, format, type, missingBucket, missingOrder, size, reverseMul); this.docValuesFunc = docValuesFunc; this.values = bigArrays.newLongArray(Math.min(size, 100), false); } @@ -87,16 +89,34 @@ void copyCurrent(int slot) { @Override int compare(int from, int to) { + if (missingBucket) { + int result = missingOrder.compare(() -> values.get(from) == -1, () -> values.get(to) == -1, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; + } + } return Long.compare(values.get(from), values.get(to)) * reverseMul; } @Override int compareCurrent(int slot) { + if (missingBucket) { + int result = missingOrder.compare(() -> currentValue == -1, () -> values.get(slot) == -1, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; + } + } return Long.compare(currentValue, values.get(slot)) * reverseMul; } @Override int compareCurrentWithAfter() { + if (missingBucket) { + int result = missingOrder.compare(() -> currentValue == -1, () -> afterValueGlobalOrd == -1, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; + } + } int cmp = Long.compare(currentValue, afterValueGlobalOrd); if (cmp == 0 && isTopValueInsertionPoint) { // the top value is missing in this shard, the comparison is against diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index f30fde164f9c4..1f1ed3b6254ba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -42,6 +42,7 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -67,6 +68,7 @@ CompositeValuesSourceConfig apply( boolean hasScript, // probably redundant with the config, but currently we check this two different ways... String format, boolean missingBucket, + MissingOrder missingOrder, SortOrder order ); } @@ -92,7 +94,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, org.opensearch.common.collect.List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC), - (valuesSourceConfig, interval, name, hasScript, format, missingBucket, order) -> { + (valuesSourceConfig, interval, name, hasScript, format, missingBucket, missingOrder, order) -> { ValuesSource.Numeric numeric = (ValuesSource.Numeric) valuesSourceConfig.getValuesSource(); final HistogramValuesSource vs = new HistogramValuesSource(numeric, interval); final MappedFieldType fieldType = valuesSourceConfig.fieldType(); @@ -103,6 +105,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { valuesSourceConfig.format(), order, missingBucket, + missingOrder, hasScript, ( BigArrays bigArrays, @@ -117,6 +120,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { numericValuesSource::doubleValues, compositeValuesSourceConfig.format(), compositeValuesSourceConfig.missingBucket(), + compositeValuesSourceConfig.missingOrder(), size, compositeValuesSourceConfig.reverseMul() ); @@ -194,6 +198,6 @@ protected ValuesSourceType getDefaultValuesSourceType() { protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config) throws IOException { return queryShardContext.getValuesSourceRegistry() .getAggregator(REGISTRY_KEY, config) - .apply(config, interval, name, script() != null, format(), missingBucket(), order()); + .apply(config, interval, name, script() != null, format(), missingBucket(), missingOrder(), order()); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java index 64eb437147510..c102095d93184 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java @@ -34,6 +34,7 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentBuilder; @@ -43,6 +44,7 @@ import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.KeyComparable; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; import java.util.AbstractMap; @@ -64,6 +66,7 @@ public class InternalComposite extends InternalMultiBucketAggregation buckets; private final CompositeKey afterKey; private final int[] reverseMuls; + private final MissingOrder[] missingOrders; private final List sourceNames; private final List formats; @@ -77,6 +80,7 @@ public class InternalComposite extends InternalMultiBucketAggregation buckets, CompositeKey afterKey, int[] reverseMuls, + MissingOrder[] missingOrders, boolean earlyTerminated, Map metadata ) { @@ -87,6 +91,7 @@ public class InternalComposite extends InternalMultiBucketAggregation new InternalBucket(input, sourceNames, formats, reverseMuls)); + if (in.getVersion().onOrAfter(Version.V_1_3_0)) { + this.missingOrders = in.readArray(MissingOrder::readFromStream, MissingOrder[]::new); + } else { + this.missingOrders = new MissingOrder[reverseMuls.length]; + Arrays.fill(this.missingOrders, MissingOrder.DEFAULT); + } + this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, formats, reverseMuls, missingOrders)); this.afterKey = in.readBoolean() ? new CompositeKey(in) : null; this.earlyTerminated = in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0) ? in.readBoolean() : false; } @@ -112,6 +123,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); } out.writeIntArray(reverseMuls); + if (out.getVersion().onOrAfter(Version.V_1_3_0)) { + out.writeArray((output, order) -> order.writeTo(output), missingOrders); + } out.writeList(buckets); out.writeBoolean(afterKey != null); if (afterKey != null) { @@ -140,7 +154,18 @@ public InternalComposite create(List newBuckets) { * keep the afterKey of the original aggregation in order * to be able to retrieve the next page even if all buckets have been filtered. */ - return new InternalComposite(name, size, sourceNames, formats, newBuckets, afterKey, reverseMuls, earlyTerminated, getMetadata()); + return new InternalComposite( + name, + size, + sourceNames, + formats, + newBuckets, + afterKey, + reverseMuls, + missingOrders, + earlyTerminated, + getMetadata() + ); } @Override @@ -150,6 +175,7 @@ public InternalBucket createBucket(InternalAggregations aggregations, InternalBu prototype.formats, prototype.key, prototype.reverseMuls, + prototype.missingOrders, prototype.docCount, aggregations ); @@ -235,7 +261,18 @@ public InternalAggregation reduce(List aggregations, Reduce lastKey = lastBucket.getRawKey(); } reduceContext.consumeBucketsAndMaybeBreak(result.size()); - return new InternalComposite(name, size, sourceNames, reducedFormats, result, lastKey, reverseMuls, earlyTerminated, metadata); + return new InternalComposite( + name, + size, + sourceNames, + reducedFormats, + result, + lastKey, + reverseMuls, + missingOrders, + earlyTerminated, + metadata + ); } @Override @@ -253,7 +290,7 @@ protected InternalBucket reduceBucket(List buckets, ReduceContex * just whatever formats make sense for *its* index. This can be real * trouble when the index doing the reducing is unmapped. */ List reducedFormats = buckets.get(0).formats; - return new InternalBucket(sourceNames, reducedFormats, buckets.get(0).key, reverseMuls, docCount, aggs); + return new InternalBucket(sourceNames, reducedFormats, buckets.get(0).key, reverseMuls, missingOrders, docCount, aggs); } @Override @@ -266,12 +303,13 @@ public boolean equals(Object obj) { return Objects.equals(size, that.size) && Objects.equals(buckets, that.buckets) && Objects.equals(afterKey, that.afterKey) - && Arrays.equals(reverseMuls, that.reverseMuls); + && Arrays.equals(reverseMuls, that.reverseMuls) + && Arrays.equals(missingOrders, that.missingOrders); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), size, buckets, afterKey, Arrays.hashCode(reverseMuls)); + return Objects.hash(super.hashCode(), size, buckets, afterKey, Arrays.hashCode(reverseMuls), Arrays.hashCode(missingOrders)); } private static class BucketIterator implements Comparable { @@ -301,6 +339,7 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern private final long docCount; private final InternalAggregations aggregations; private final transient int[] reverseMuls; + private final transient MissingOrder[] missingOrders; private final transient List sourceNames; private final transient List formats; @@ -309,6 +348,7 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern List formats, CompositeKey key, int[] reverseMuls, + MissingOrder[] missingOrders, long docCount, InternalAggregations aggregations ) { @@ -316,15 +356,23 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern this.docCount = docCount; this.aggregations = aggregations; this.reverseMuls = reverseMuls; + this.missingOrders = missingOrders; this.sourceNames = sourceNames; this.formats = formats; } - InternalBucket(StreamInput in, List sourceNames, List formats, int[] reverseMuls) throws IOException { + InternalBucket( + StreamInput in, + List sourceNames, + List formats, + int[] reverseMuls, + MissingOrder[] missingOrders + ) throws IOException { this.key = new CompositeKey(in); this.docCount = in.readVLong(); this.aggregations = InternalAggregations.readFrom(in); this.reverseMuls = reverseMuls; + this.missingOrders = missingOrders; this.sourceNames = sourceNames; this.formats = formats; } @@ -400,13 +448,15 @@ List getFormats() { @Override public int compareKey(InternalBucket other) { for (int i = 0; i < key.size(); i++) { - if (key.get(i) == null) { - if (other.key.get(i) == null) { + // lambda function require final variable. + final int index = i; + int result = missingOrders[i].compare(() -> key.get(index) == null, () -> other.key.get(index) == null, reverseMuls[i]); + if (MissingOrder.unknownOrder(result) == false) { + if (result == 0) { continue; + } else { + return result; } - return -1 * reverseMuls[i]; - } else if (other.key.get(i) == null) { - return reverseMuls[i]; } assert key.get(i).getClass() == other.key.get(i).getClass(); @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java index e964283e2e362..0ce147a138a96 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -54,8 +54,10 @@ import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; +import java.util.Objects; import java.util.function.LongUnaryOperator; import java.util.function.ToLongFunction; @@ -79,10 +81,11 @@ class LongValuesSource extends SingleDimensionValuesSource { LongUnaryOperator rounding, DocValueFormat format, boolean missingBucket, + MissingOrder missingOrder, int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); this.bigArrays = bigArrays; this.docValuesFunc = docValuesFunc; this.rounding = rounding; @@ -107,10 +110,9 @@ void copyCurrent(int slot) { @Override int compare(int from, int to) { if (missingBucket) { - if (bits.get(from) == false) { - return bits.get(to) ? -1 * reverseMul : 0; - } else if (bits.get(to) == false) { - return reverseMul; + int result = missingOrder.compare(() -> bits.get(from) == false, () -> bits.get(to) == false, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(values.get(from), values.get(to)); @@ -119,10 +121,9 @@ int compare(int from, int to) { @Override int compareCurrent(int slot) { if (missingBucket) { - if (missingCurrentValue) { - return bits.get(slot) ? -1 * reverseMul : 0; - } else if (bits.get(slot) == false) { - return reverseMul; + int result = missingOrder.compare(() -> missingCurrentValue, () -> bits.get(slot) == false, reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(currentValue, values.get(slot)); @@ -131,10 +132,9 @@ int compareCurrent(int slot) { @Override int compareCurrentWithAfter() { if (missingBucket) { - if (missingCurrentValue) { - return afterValue != null ? -1 * reverseMul : 0; - } else if (afterValue == null) { - return reverseMul; + int result = missingOrder.compare(() -> missingCurrentValue, () -> Objects.isNull(afterValue), reverseMul); + if (MissingOrder.unknownOrder(result) == false) { + return result; } } return compareValues(currentValue, afterValue); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java index faa94963ae5c9..94b108e863e3d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -41,10 +41,13 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.sort.SortOrder; import java.io.IOException; +import static org.opensearch.search.aggregations.bucket.missing.MissingOrder.LAST; + /** * A source that can record and compare values of similar type. */ @@ -54,6 +57,7 @@ abstract class SingleDimensionValuesSource> implements R @Nullable protected final MappedFieldType fieldType; protected final boolean missingBucket; + protected final MissingOrder missingOrder; protected final int size; protected final int reverseMul; @@ -67,6 +71,7 @@ abstract class SingleDimensionValuesSource> implements R * @param format The format of the source. * @param fieldType The field type or null if the source is a script. * @param missingBucket If true, an explicit `null bucket represents documents with missing values. + * @param missingOrder The `null bucket's position. * @param size The number of values to record. * @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed. */ @@ -75,6 +80,7 @@ abstract class SingleDimensionValuesSource> implements R DocValueFormat format, @Nullable MappedFieldType fieldType, boolean missingBucket, + MissingOrder missingOrder, int size, int reverseMul ) { @@ -82,6 +88,7 @@ abstract class SingleDimensionValuesSource> implements R this.format = format; this.fieldType = fieldType; this.missingBucket = missingBucket; + this.missingOrder = missingOrder; this.size = size; this.reverseMul = reverseMul; this.afterValue = null; @@ -167,8 +174,11 @@ T getAfter() { * Returns true if a {@link SortedDocsProducer} should be used to optimize the execution. */ protected boolean checkIfSortedDocsIsApplicable(IndexReader reader, MappedFieldType fieldType) { - if (fieldType == null || (missingBucket && afterValue == null) || fieldType.isSearchable() == false || - // inverse of the natural order + if (fieldType == null + || (missingBucket && (afterValue == null || reverseMul == 1 && missingOrder == LAST)) + || fieldType.isSearchable() == false + || + // inverse of the natural order reverseMul == -1) { return false; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index d70b9dece82c0..c871e8d0c7d5b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -43,6 +43,7 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.script.Script; import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -68,6 +69,7 @@ CompositeValuesSourceConfig apply( boolean hasScript, // probably redundant with the config, but currently we check this two different ways... String format, boolean missingBucket, + MissingOrder missingOrder, SortOrder order ); } @@ -111,7 +113,7 @@ static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, org.opensearch.common.collect.List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BOOLEAN), - (valuesSourceConfig, name, hasScript, format, missingBucket, order) -> { + (valuesSourceConfig, name, hasScript, format, missingBucket, missingOrder, order) -> { final DocValueFormat docValueFormat; if (format == null && valuesSourceConfig.valueSourceType() == CoreValuesSourceType.DATE) { // defaults to the raw format on date fields (preserve timestamp as longs). @@ -126,6 +128,7 @@ static void register(ValuesSourceRegistry.Builder builder) { docValueFormat, order, missingBucket, + missingOrder, hasScript, ( BigArrays bigArrays, @@ -142,6 +145,7 @@ static void register(ValuesSourceRegistry.Builder builder) { vs::doubleValues, compositeValuesSourceConfig.format(), compositeValuesSourceConfig.missingBucket(), + compositeValuesSourceConfig.missingOrder(), size, compositeValuesSourceConfig.reverseMul() ); @@ -156,6 +160,7 @@ static void register(ValuesSourceRegistry.Builder builder) { rounding, compositeValuesSourceConfig.format(), compositeValuesSourceConfig.missingBucket(), + compositeValuesSourceConfig.missingOrder(), size, compositeValuesSourceConfig.reverseMul() ); @@ -170,13 +175,14 @@ static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, org.opensearch.common.collect.List.of(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP), - (valuesSourceConfig, name, hasScript, format, missingBucket, order) -> new CompositeValuesSourceConfig( + (valuesSourceConfig, name, hasScript, format, missingBucket, missingOrder, order) -> new CompositeValuesSourceConfig( name, valuesSourceConfig.fieldType(), valuesSourceConfig.getValuesSource(), valuesSourceConfig.format(), order, missingBucket, + missingOrder, hasScript, ( BigArrays bigArrays, @@ -193,6 +199,7 @@ static void register(ValuesSourceRegistry.Builder builder) { vs::globalOrdinalsValues, compositeValuesSourceConfig.format(), compositeValuesSourceConfig.missingBucket(), + compositeValuesSourceConfig.missingOrder(), size, compositeValuesSourceConfig.reverseMul() ); @@ -205,6 +212,7 @@ static void register(ValuesSourceRegistry.Builder builder) { vs::bytesValues, compositeValuesSourceConfig.format(), compositeValuesSourceConfig.missingBucket(), + compositeValuesSourceConfig.missingOrder(), size, compositeValuesSourceConfig.reverseMul() ); @@ -224,6 +232,6 @@ protected ValuesSourceType getDefaultValuesSourceType() { protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config) throws IOException { return queryShardContext.getValuesSourceRegistry() .getAggregator(REGISTRY_KEY, config) - .apply(config, name, script() != null, format(), missingBucket(), order()); + .apply(config, name, script() != null, format(), missingBucket(), missingOrder(), order()); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingOrder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingOrder.java new file mode 100644 index 0000000000000..06086f507f665 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingOrder.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.missing; + +import org.opensearch.common.inject.Provider; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * Composite Aggregation Missing bucket order. + */ +public enum MissingOrder implements Writeable { + /** + * missing first. + */ + FIRST { + @Override + public int compare(Provider leftIsMissing, Provider rightIsMissing, int reverseMul) { + if (leftIsMissing.get()) { + return rightIsMissing.get() ? 0 : -1; + } else if (rightIsMissing.get()) { + return 1; + } + return MISSING_ORDER_UNKNOWN; + } + + @Override + public String toString() { + return "first"; + } + }, + + /** + * missing last. + */ + LAST { + @Override + public int compare(Provider leftIsMissing, Provider rightIsMissing, int reverseMul) { + if (leftIsMissing.get()) { + return rightIsMissing.get() ? 0 : 1; + } else if (rightIsMissing.get()) { + return -1; + } + return MISSING_ORDER_UNKNOWN; + } + + @Override + public String toString() { + return "last"; + } + }, + + /** + * Default: ASC missing first / DESC missing last + */ + DEFAULT { + @Override + public int compare(Provider leftIsMissing, Provider rightIsMissing, int reverseMul) { + if (leftIsMissing.get()) { + return rightIsMissing.get() ? 0 : -1 * reverseMul; + } else if (rightIsMissing.get()) { + return reverseMul; + } + return MISSING_ORDER_UNKNOWN; + } + + @Override + public String toString() { + return "default"; + } + }; + + public static final String NAME = "missing_order"; + + private static int MISSING_ORDER_UNKNOWN = Integer.MIN_VALUE; + + public static MissingOrder readFromStream(StreamInput in) throws IOException { + return in.readEnum(MissingOrder.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + public static boolean isDefault(MissingOrder order) { + return order == DEFAULT; + } + + public static MissingOrder fromString(String order) { + return valueOf(order.toUpperCase(Locale.ROOT)); + } + + public static boolean unknownOrder(int v) { + return v == MISSING_ORDER_UNKNOWN; + } + + public abstract int compare(Provider leftIsMissing, Provider rightIsMissing, int reverseMul); +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java index 8731bb3afea5a..98cd9e66d9e7e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java @@ -228,8 +228,8 @@ public Aggregator resolveTopmostAggregator(Aggregator root) { AggregationPath.PathElement token = pathElements.get(0); // TODO both unwrap and subAggregator are only used here! Aggregator aggregator = ProfilingAggregator.unwrap(root.subAggregator(token.name)); - assert (aggregator instanceof SingleBucketAggregator) - || (aggregator instanceof NumericMetricsAggregator) : "this should be picked up before aggregation execution - on validate"; + assert (aggregator instanceof SingleBucketAggregator) || (aggregator instanceof NumericMetricsAggregator) + : "this should be picked up before aggregation execution - on validate"; return aggregator; } diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java index 24bb8f6715744..d26dbac33afea 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java @@ -36,6 +36,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.ParsedQuery; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.fetch.subphase.FetchDocValuesContext; import org.opensearch.search.fetch.subphase.FetchFieldsContext; @@ -220,6 +221,10 @@ public SearchExtBuilder getSearchExt(String name) { return searchContext.getSearchExt(name); } + public QueryShardContext getQueryShardContext() { + return searchContext.getQueryShardContext(); + } + /** * For a hit document that's being processed, return the source lookup representing the * root document. This method is used to pass down the root source when processing this diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java index e1537b6aa1934..7b1beaed07c4f 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java @@ -330,7 +330,7 @@ private HitContext prepareNonNestedHitContext( Text typeText = documentMapper.typeText(); if (fieldsVisitor == null) { - SearchHit hit = new SearchHit(docId, null, typeText, null, null); + SearchHit hit = new SearchHit(docId, null, null, null); return new HitContext(hit, subReaderContext, subDocId, lookup.source()); } else { SearchHit hit; @@ -340,9 +340,9 @@ private HitContext prepareNonNestedHitContext( Map docFields = new HashMap<>(); Map metaFields = new HashMap<>(); fillDocAndMetaFields(context, fieldsVisitor, storedToRequestedFields, docFields, metaFields); - hit = new SearchHit(docId, uid.id(), typeText, docFields, metaFields); + hit = new SearchHit(docId, uid.id(), docFields, metaFields); } else { - hit = new SearchHit(docId, uid.id(), typeText, emptyMap(), emptyMap()); + hit = new SearchHit(docId, uid.id(), emptyMap(), emptyMap()); } HitContext hitContext = new HitContext(hit, subReaderContext, subDocId, lookup.source()); @@ -420,7 +420,6 @@ private HitContext prepareNestedHitContext( } DocumentMapper documentMapper = context.mapperService().documentMapper(); - Text typeText = documentMapper.typeText(); ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedDocId, context, subReaderContext); assert nestedObjectMapper != null; @@ -432,7 +431,7 @@ private HitContext prepareNestedHitContext( nestedObjectMapper ); - SearchHit hit = new SearchHit(nestedTopDocId, rootId.id(), typeText, nestedIdentity, docFields, metaFields); + SearchHit hit = new SearchHit(nestedTopDocId, rootId.id(), nestedIdentity, docFields, metaFields); HitContext hitContext = new HitContext(hit, subReaderContext, nestedDocId, new SourceLookup()); // Use a clean, fresh SourceLookup // for the nested context diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java index 0f7af12d90873..353ec3f8080d2 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.common.document.DocumentField; import org.opensearch.index.mapper.IgnoredFieldMapper; -import org.opensearch.index.mapper.MapperService; import org.opensearch.search.SearchHit; import org.opensearch.search.fetch.FetchContext; import org.opensearch.search.fetch.FetchSubPhase; @@ -61,7 +60,6 @@ public FetchSubPhaseProcessor getProcessor(FetchContext fetchContext) { return null; } - MapperService mapperService = fetchContext.mapperService(); SearchLookup searchLookup = fetchContext.searchLookup(); if (fetchContext.mapperService().documentMapper().sourceMapper().enabled() == false) { throw new IllegalArgumentException( @@ -72,7 +70,7 @@ public FetchSubPhaseProcessor getProcessor(FetchContext fetchContext) { ); } - FieldFetcher fieldFetcher = FieldFetcher.create(mapperService, searchLookup, fetchFieldsContext.fields()); + FieldFetcher fieldFetcher = FieldFetcher.create(fetchContext.getQueryShardContext(), searchLookup, fetchFieldsContext.fields()); return new FetchSubPhaseProcessor() { @Override public void setNextReader(LeafReaderContext readerContext) { diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FieldFetcher.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FieldFetcher.java index 718c7971caf6f..6790a1a79d634 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FieldFetcher.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FieldFetcher.java @@ -35,8 +35,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.common.document.DocumentField; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ValueFetcher; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.lookup.SourceLookup; @@ -53,7 +53,7 @@ * Then given a specific document, it can retrieve the corresponding fields from the document's source. */ public class FieldFetcher { - public static FieldFetcher create(MapperService mapperService, SearchLookup searchLookup, Collection fieldAndFormats) { + public static FieldFetcher create(QueryShardContext context, SearchLookup searchLookup, Collection fieldAndFormats) { List fieldContexts = new ArrayList<>(); @@ -61,13 +61,13 @@ public static FieldFetcher create(MapperService mapperService, SearchLookup sear String fieldPattern = fieldAndFormat.field; String format = fieldAndFormat.format; - Collection concreteFields = mapperService.simpleMatchToFullName(fieldPattern); + Collection concreteFields = context.simpleMatchToIndexNames(fieldPattern); for (String field : concreteFields) { - MappedFieldType ft = mapperService.fieldType(field); - if (ft == null || mapperService.isMetadataField(field)) { + MappedFieldType ft = context.getFieldType(field); + if (ft == null || context.isMetadataField(field)) { continue; } - ValueFetcher valueFetcher = ft.valueFetcher(mapperService, searchLookup, format); + ValueFetcher valueFetcher = ft.valueFetcher(context, searchLookup, format); fieldContexts.add(new FieldContext(field, valueFetcher)); } } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java index d9986fc90c9ce..2bb610f49215c 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.Uid; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; @@ -95,7 +96,7 @@ private void hitExecute(Map innerHi docIdsToLoad[j] = topDoc.topDocs.scoreDocs[j].doc; } innerHitsContext.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); - innerHitsContext.setRootId(new Uid(hit.getType(), hit.getId())); + innerHitsContext.setRootId(new Uid(MapperService.SINGLE_MAPPING_NAME, hit.getId())); innerHitsContext.setRootLookup(rootLookup); fetchPhase.execute(innerHitsContext); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java index 080a869bb4825..3265a339595f5 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java @@ -36,8 +36,9 @@ import org.apache.lucene.search.highlight.SimpleHTMLEncoder; import org.opensearch.index.fieldvisitor.CustomFieldsVisitor; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.ValueFetcher; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.fetch.FetchSubPhase; -import org.opensearch.search.lookup.SourceLookup; import java.io.IOException; import java.util.Collections; @@ -58,24 +59,20 @@ private HighlightUtils() { /** * Load field values for highlighting. */ - public static List loadFieldValues(MappedFieldType fieldType, FetchSubPhase.HitContext hitContext, boolean forceSource) - throws IOException { - // percolator needs to always load from source, thus it sets the global force source to true - List textsToHighlight; + public static List loadFieldValues( + MappedFieldType fieldType, + QueryShardContext context, + FetchSubPhase.HitContext hitContext, + boolean forceSource + ) throws IOException { if (forceSource == false && fieldType.isStored()) { CustomFieldsVisitor fieldVisitor = new CustomFieldsVisitor(singleton(fieldType.name()), false); hitContext.reader().document(hitContext.docId(), fieldVisitor); - textsToHighlight = fieldVisitor.fields().get(fieldType.name()); - if (textsToHighlight == null) { - // Can happen if the document doesn't have the field to highlight - textsToHighlight = Collections.emptyList(); - } - } else { - SourceLookup sourceLookup = hitContext.sourceLookup(); - textsToHighlight = sourceLookup.extractRawValues(fieldType.name()); + List textsToHighlight = fieldVisitor.fields().get(fieldType.name()); + return textsToHighlight != null ? textsToHighlight : Collections.emptyList(); } - assert textsToHighlight != null; - return textsToHighlight; + ValueFetcher fetcher = fieldType.valueFetcher(context, null, null); + return fetcher.fetchValues(hitContext.sourceLookup()); } public static class Encoders { diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java index 94ca1745bf271..d2699c650d887 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -49,7 +49,6 @@ import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.text.Text; import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.fetch.FetchContext; import org.opensearch.search.fetch.FetchSubPhase; @@ -117,26 +116,14 @@ public HighlightField highlight(FieldHighlightContext fieldContext) throws IOExc int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments(); ArrayList fragsList = new ArrayList<>(); List textsToHighlight; - Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer(); - Integer keywordIgnoreAbove = null; - if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) { - KeywordFieldMapper mapper = (KeywordFieldMapper) context.mapperService() - .documentMapper() - .mappers() - .getMapper(fieldContext.fieldName); - keywordIgnoreAbove = mapper.ignoreAbove(); - } - ; + Analyzer analyzer = context.mapperService().documentMapper().mappers().indexAnalyzer(); final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset(); - textsToHighlight = HighlightUtils.loadFieldValues(fieldType, hitContext, fieldContext.forceSource); + textsToHighlight = HighlightUtils.loadFieldValues(fieldType, context.getQueryShardContext(), hitContext, fieldContext.forceSource); for (Object textToHighlight : textsToHighlight) { String text = convertFieldValue(fieldType, textToHighlight); int textLength = text.length(); - if (keywordIgnoreAbove != null && textLength > keywordIgnoreAbove) { - continue; // skip highlighting keyword terms that were ignored during indexing - } if (textLength > maxAnalyzedOffset) { throw new IllegalArgumentException( "The length of [" diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index 9fe500868cd6f..8f0c434674feb 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -51,6 +51,7 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.TextSearchInfo; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.fetch.FetchSubPhase; import org.opensearch.search.fetch.FetchSubPhase.HitContext; @@ -88,7 +89,13 @@ public HighlightField highlight(FieldHighlightContext fieldContext) throws IOExc FetchSubPhase.HitContext hitContext = fieldContext.hitContext; CheckedSupplier loadFieldValues = () -> { - List fieldValues = loadFieldValues(highlighter, fieldType, field, hitContext, fieldContext.forceSource); + List fieldValues = loadFieldValues( + highlighter, + fieldContext.context.getQueryShardContext(), + fieldType, + hitContext, + fieldContext.forceSource + ); if (fieldValues.size() == 0) { return null; } @@ -186,12 +193,12 @@ protected Analyzer getAnalyzer(DocumentMapper docMapper) { protected List loadFieldValues( CustomUnifiedHighlighter highlighter, + QueryShardContext context, MappedFieldType fieldType, - SearchHighlightContext.Field field, FetchSubPhase.HitContext hitContext, boolean forceSource ) throws IOException { - List fieldValues = HighlightUtils.loadFieldValues(fieldType, hitContext, forceSource); + List fieldValues = HighlightUtils.loadFieldValues(fieldType, context, hitContext, forceSource); fieldValues = fieldValues.stream().map((s) -> convertFieldValue(fieldType, s)).collect(Collectors.toList()); return fieldValues; } diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index 42690bc88c9e2..f41f7fae8b786 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -71,6 +71,7 @@ import org.opensearch.transport.TransportRequest; import java.io.IOException; +import java.util.Arrays; import java.util.Map; import java.util.function.Function; @@ -87,7 +88,6 @@ public class ShardSearchRequest extends TransportRequest implements IndicesReque private final int numberOfShards; private final SearchType searchType; private final Scroll scroll; - private final String[] types; private final float indexBoost; private final Boolean requestCache; private final long nowInMillis; @@ -152,7 +152,6 @@ public ShardSearchRequest( numberOfShards, searchRequest.searchType(), searchRequest.source(), - searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost, @@ -170,14 +169,13 @@ public ShardSearchRequest( assert searchRequest.allowPartialSearchResults() != null; } - public ShardSearchRequest(ShardId shardId, String[] types, long nowInMillis, AliasFilter aliasFilter) { + public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter) { this( OriginalIndices.NONE, shardId, -1, SearchType.QUERY_THEN_FETCH, null, - types, null, aliasFilter, 1.0f, @@ -198,7 +196,6 @@ private ShardSearchRequest( int numberOfShards, SearchType searchType, SearchSourceBuilder source, - String[] types, Boolean requestCache, AliasFilter aliasFilter, float indexBoost, @@ -215,7 +212,6 @@ private ShardSearchRequest( this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; - this.types = types; this.requestCache = requestCache; this.aliasFilter = aliasFilter; this.indexBoost = indexBoost; @@ -240,7 +236,13 @@ public ShardSearchRequest(StreamInput in) throws IOException { numberOfShards = in.readVInt(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - types = in.readStringArray(); + if (in.getVersion().before(Version.V_2_0_0)) { + // types no longer relevant so ignore + String[] types = in.readStringArray(); + if (types.length > 0) { + throw new IllegalStateException("types are no longer supported in ids query but found [" + Arrays.toString(types) + "]"); + } + } aliasFilter = new AliasFilter(in); indexBoost = in.readFloat(); nowInMillis = in.readVLong(); @@ -281,7 +283,6 @@ public ShardSearchRequest(ShardSearchRequest clone) { this.numberOfShards = clone.numberOfShards; this.scroll = clone.scroll; this.source = clone.source; - this.types = clone.types; this.aliasFilter = clone.aliasFilter; this.indexBoost = clone.indexBoost; this.nowInMillis = clone.nowInMillis; @@ -314,7 +315,10 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce } out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - out.writeStringArray(types); + if (out.getVersion().before(Version.V_2_0_0)) { + // types not supported so send an empty array to previous versions + out.writeStringArray(Strings.EMPTY_ARRAY); + } aliasFilter.writeTo(out); out.writeFloat(indexBoost); if (asKey == false) { @@ -363,10 +367,6 @@ public ShardId shardId() { return shardId; } - public String[] types() { - return types; - } - public SearchSourceBuilder source() { return source; } diff --git a/server/src/main/java/org/opensearch/search/lookup/DocLookup.java b/server/src/main/java/org/opensearch/search/lookup/DocLookup.java index be85a59a5db53..13e86c235db57 100644 --- a/server/src/main/java/org/opensearch/search/lookup/DocLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/DocLookup.java @@ -32,7 +32,6 @@ package org.opensearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; -import org.opensearch.common.Nullable; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -43,13 +42,10 @@ public class DocLookup { private final MapperService mapperService; private final Function> fieldDataLookup; - @Nullable - private final String[] types; - DocLookup(MapperService mapperService, Function> fieldDataLookup, @Nullable String[] types) { + DocLookup(MapperService mapperService, Function> fieldDataLookup) { this.mapperService = mapperService; this.fieldDataLookup = fieldDataLookup; - this.types = types; } public MapperService mapperService() { @@ -61,10 +57,6 @@ public IndexFieldData getForField(MappedFieldType fieldType) { } public LeafDocLookup getLeafDocLookup(LeafReaderContext context) { - return new LeafDocLookup(mapperService, fieldDataLookup, types, context); - } - - public String[] getTypes() { - return types; + return new LeafDocLookup(mapperService, fieldDataLookup, context); } } diff --git a/server/src/main/java/org/opensearch/search/lookup/FieldsLookup.java b/server/src/main/java/org/opensearch/search/lookup/FieldsLookup.java index 9fd8268ad4b67..9af22c65aba28 100644 --- a/server/src/main/java/org/opensearch/search/lookup/FieldsLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/FieldsLookup.java @@ -32,22 +32,18 @@ package org.opensearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; -import org.opensearch.common.Nullable; import org.opensearch.index.mapper.MapperService; public class FieldsLookup { private final MapperService mapperService; - @Nullable - private final String[] types; - FieldsLookup(MapperService mapperService, @Nullable String[] types) { + FieldsLookup(MapperService mapperService) { this.mapperService = mapperService; - this.types = types; } public LeafFieldsLookup getLeafFieldsLookup(LeafReaderContext context) { - return new LeafFieldsLookup(mapperService, types, context.reader()); + return new LeafFieldsLookup(mapperService, context.reader()); } } diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java index 4174ac2c55058..82daa94d92146 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.ExceptionsHelper; -import org.opensearch.common.Nullable; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.ScriptDocValues; @@ -43,7 +42,6 @@ import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -60,23 +58,13 @@ public class LeafDocLookup implements Map> { private final MapperService mapperService; private final Function> fieldDataLookup; - - @Nullable - private final String[] types; - private final LeafReaderContext reader; private int docId = -1; - LeafDocLookup( - MapperService mapperService, - Function> fieldDataLookup, - @Nullable String[] types, - LeafReaderContext reader - ) { + LeafDocLookup(MapperService mapperService, Function> fieldDataLookup, LeafReaderContext reader) { this.mapperService = mapperService; this.fieldDataLookup = fieldDataLookup; - this.types = types; this.reader = reader; } @@ -100,9 +88,7 @@ public ScriptDocValues get(Object key) { if (scriptValues == null) { final MappedFieldType fieldType = mapperService.fieldType(fieldName); if (fieldType == null) { - throw new IllegalArgumentException( - "No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) - ); + throw new IllegalArgumentException("No field found for [" + fieldName + "] in mapping"); } // load fielddata on behalf of the script: otherwise it would need additional permissions // to deal with pagedbytes/ramusagestimator/etc diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java index 6b56a5bf14acf..14c5dade52c87 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.LeafReader; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Nullable; import org.opensearch.index.fieldvisitor.SingleFieldsVisitor; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -42,7 +41,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -54,19 +52,14 @@ public class LeafFieldsLookup implements Map { private final MapperService mapperService; - - @Nullable - private final String[] types; - private final LeafReader reader; private int docId = -1; private final Map cachedFieldData = new HashMap<>(); - LeafFieldsLookup(MapperService mapperService, @Nullable String[] types, LeafReader reader) { + LeafFieldsLookup(MapperService mapperService, LeafReader reader) { this.mapperService = mapperService; - this.types = types; this.reader = reader; } @@ -148,7 +141,7 @@ private FieldLookup loadFieldData(String name) { if (data == null) { MappedFieldType fieldType = mapperService.fieldType(name); if (fieldType == null) { - throw new IllegalArgumentException("No field found for [" + name + "] in mapping with types " + Arrays.toString(types)); + throw new IllegalArgumentException("No field found for [" + name + "] in mapping"); } data = new FieldLookup(fieldType); cachedFieldData.put(name, data); diff --git a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java index 269052f895066..11feb3f2f9e57 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java @@ -33,7 +33,6 @@ package org.opensearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; -import org.opensearch.common.Nullable; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -75,17 +74,15 @@ public class SearchLookup { */ public SearchLookup( MapperService mapperService, - BiFunction, IndexFieldData> fieldDataLookup, - @Nullable String[] types + BiFunction, IndexFieldData> fieldDataLookup ) { this.fieldChain = Collections.emptySet(); docMap = new DocLookup( mapperService, - fieldType -> fieldDataLookup.apply(fieldType, () -> forkAndTrackFieldReferences(fieldType.name())), - types + fieldType -> fieldDataLookup.apply(fieldType, () -> forkAndTrackFieldReferences(fieldType.name())) ); sourceLookup = new SourceLookup(); - fieldsLookup = new FieldsLookup(mapperService, types); + fieldsLookup = new FieldsLookup(mapperService); this.fieldDataLookup = fieldDataLookup; } @@ -100,8 +97,7 @@ private SearchLookup(SearchLookup searchLookup, Set fieldChain) { this.fieldChain = Collections.unmodifiableSet(fieldChain); this.docMap = new DocLookup( searchLookup.docMap.mapperService(), - fieldType -> searchLookup.fieldDataLookup.apply(fieldType, () -> forkAndTrackFieldReferences(fieldType.name())), - searchLookup.docMap.getTypes() + fieldType -> searchLookup.fieldDataLookup.apply(fieldType, () -> forkAndTrackFieldReferences(fieldType.name())) ); this.sourceLookup = searchLookup.sourceLookup; this.fieldsLookup = searchLookup.fieldsLookup; diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java index b133769e2818f..e78741f48a223 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java @@ -34,34 +34,24 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.PointValues; import org.apache.lucene.queries.MinDocQuery; import org.apache.lucene.queries.SearchAfterSortedDocQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopFieldCollector; -import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.FutureArrays; import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.Booleans; -import org.opensearch.common.CheckedConsumer; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor; @@ -85,12 +75,7 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; import java.util.LinkedList; -import java.util.List; import java.util.concurrent.ExecutorService; import static org.opensearch.search.query.QueryCollectorContext.createEarlyTerminationCollectorContext; @@ -98,7 +83,6 @@ import static org.opensearch.search.query.QueryCollectorContext.createMinScoreCollectorContext; import static org.opensearch.search.query.QueryCollectorContext.createMultiCollectorContext; import static org.opensearch.search.query.TopDocsCollectorContext.createTopDocsCollectorContext; -import static org.opensearch.search.query.TopDocsCollectorContext.shortcutTotalHitCount; /** * Query phase of a search request, used to run the query and get back from each shard information about the matching documents @@ -183,7 +167,6 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep */ static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExecutionException { final ContextIndexSearcher searcher = searchContext.searcher(); - SortAndFormats sortAndFormatsForRewrittenNumericSort = null; final IndexReader reader = searcher.getIndexReader(); QuerySearchResult queryResult = searchContext.queryResult(); queryResult.searchTimedOut(false); @@ -252,26 +235,9 @@ static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExe // this collector can filter documents during the collection hasFilterCollector = true; } - - CheckedConsumer, IOException> leafSorter = l -> {}; - // try to rewrite numeric or date sort to the optimized distanceFeatureQuery + // optimizing sort on Numerics (long and date) if ((searchContext.sort() != null) && SYS_PROP_REWRITE_SORT) { - Query rewrittenQuery = tryRewriteLongSort(searchContext, searcher.getIndexReader(), query, hasFilterCollector); - if (rewrittenQuery != null) { - query = rewrittenQuery; - // modify sorts: add sort on _score as 1st sort, and move the sort on the original field as the 2nd sort - SortField[] oldSortFields = searchContext.sort().sort.getSort(); - DocValueFormat[] oldFormats = searchContext.sort().formats; - SortField[] newSortFields = new SortField[oldSortFields.length + 1]; - DocValueFormat[] newFormats = new DocValueFormat[oldSortFields.length + 1]; - newSortFields[0] = SortField.FIELD_SCORE; - newFormats[0] = DocValueFormat.RAW; - System.arraycopy(oldSortFields, 0, newSortFields, 1, oldSortFields.length); - System.arraycopy(oldFormats, 0, newFormats, 1, oldFormats.length); - sortAndFormatsForRewrittenNumericSort = searchContext.sort(); // stash SortAndFormats to restore it later - searchContext.sort(new SortAndFormats(new Sort(newSortFields), newFormats)); - leafSorter = createLeafSorter(oldSortFields[0]); - } + enhanceSortOnNumeric(searchContext, searcher.getIndexReader()); } boolean timeoutSet = scrollContext == null @@ -303,20 +269,7 @@ static boolean executeInternal(SearchContext searchContext) throws QueryPhaseExe } try { - boolean shouldRescore; - // if we are optimizing sort and there are no other collectors - if (sortAndFormatsForRewrittenNumericSort != null && collectors.size() == 0 && searchContext.getProfilers() == null) { - shouldRescore = searchWithCollectorManager(searchContext, searcher, query, leafSorter, timeoutSet); - } else { - shouldRescore = searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, timeoutSet); - } - - // if we rewrote numeric long or date sort, restore fieldDocs based on the original sort - if (sortAndFormatsForRewrittenNumericSort != null) { - searchContext.sort(sortAndFormatsForRewrittenNumericSort); // restore SortAndFormats - restoreTopFieldDocs(queryResult, sortAndFormatsForRewrittenNumericSort); - } - + boolean shouldRescore = searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, timeoutSet); ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH); if (executor instanceof QueueResizingOpenSearchThreadPoolExecutor) { QueueResizingOpenSearchThreadPoolExecutor rExecutor = (QueueResizingOpenSearchThreadPoolExecutor) executor; @@ -379,182 +332,25 @@ private static boolean searchWithCollector( return topDocsFactory.shouldRescore(); } - /* - * We use collectorManager during sort optimization, where - * we have already checked that there are no other collectors, no filters, - * no search after, no scroll, no collapse, no track scores. - * Absence of all other collectors and parameters allows us to use TopFieldCollector directly. - */ - private static boolean searchWithCollectorManager( - SearchContext searchContext, - ContextIndexSearcher searcher, - Query query, - CheckedConsumer, IOException> leafSorter, - boolean timeoutSet - ) throws IOException { - final IndexReader reader = searchContext.searcher().getIndexReader(); - final int numHits = Math.min(searchContext.from() + searchContext.size(), Math.max(1, reader.numDocs())); - final SortAndFormats sortAndFormats = searchContext.sort(); - - int totalHitsThreshold; - TotalHits totalHits; - if (searchContext.trackTotalHitsUpTo() == SearchContext.TRACK_TOTAL_HITS_DISABLED) { - totalHitsThreshold = 1; - totalHits = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); - } else { - int hitCount = shortcutTotalHitCount(reader, query); - if (hitCount == -1) { - totalHitsThreshold = searchContext.trackTotalHitsUpTo(); - totalHits = null; // will be computed via the collector - } else { - totalHitsThreshold = 1; - totalHits = new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); // don't compute hit counts via the collector - } - } - - CollectorManager sharedManager = TopFieldCollector.createSharedManager( - sortAndFormats.sort, - numHits, - null, - totalHitsThreshold - ); - - List leaves = new ArrayList<>(searcher.getIndexReader().leaves()); - leafSorter.accept(leaves); - try { - Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1f); - searcher.search(leaves, weight, sharedManager, searchContext.queryResult(), sortAndFormats.formats, totalHits); - } catch (TimeExceededException e) { - assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; - if (searchContext.request().allowPartialSearchResults() == false) { - // Can't rethrow TimeExceededException because not serializable - throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Time exceeded"); - } - searchContext.queryResult().searchTimedOut(true); - } - return false; // no rescoring when sorting by field - } - - private static Query tryRewriteLongSort(SearchContext searchContext, IndexReader reader, Query query, boolean hasFilterCollector) - throws IOException { - if ((searchContext.from() + searchContext.size()) <= 0) return null; - if (searchContext.searchAfter() != null) return null; // TODO: handle sort optimization with search after - if (searchContext.scrollContext() != null) return null; - if (searchContext.collapse() != null) return null; - if (searchContext.trackScores()) return null; - if (searchContext.aggregations() != null) return null; + private static void enhanceSortOnNumeric(SearchContext searchContext, IndexReader reader) { if (canEarlyTerminate(reader, searchContext.sort())) { // disable this optimization if index sorting matches the query sort since it's already optimized by index searcher - return null; + return; } Sort sort = searchContext.sort().sort; SortField sortField = sort.getSort()[0]; - if (SortField.Type.LONG.equals(IndexSortConfig.getSortFieldType(sortField)) == false) return null; + if (SortField.Type.LONG.equals(IndexSortConfig.getSortFieldType(sortField)) == false) return; // check if this is a field of type Long or Date, that is indexed and has doc values String fieldName = sortField.getField(); - if (fieldName == null) return null; // happens when _score or _doc is the 1st sort field - if (searchContext.mapperService() == null) return null; // mapperService can be null in tests + if (fieldName == null) return; // happens when _score or _doc is the 1st sort field + if (searchContext.mapperService() == null) return; // mapperService can be null in tests final MappedFieldType fieldType = searchContext.mapperService().fieldType(fieldName); - if (fieldType == null) return null; // for unmapped fields, default behaviour depending on "unmapped_type" flag - if ((fieldType.typeName().equals("long") == false) && (fieldType instanceof DateFieldType == false)) return null; - if (fieldType.isSearchable() == false) return null; - if (fieldType.hasDocValues() == false) return null; - - // check that all sorts are actual document fields or _doc - for (int i = 1; i < sort.getSort().length; i++) { - SortField sField = sort.getSort()[i]; - String sFieldName = sField.getField(); - if (sFieldName == null) { - if (SortField.FIELD_DOC.equals(sField) == false) return null; - } else { - // TODO: find out how to cover _script sort that don't use _score - if (searchContext.mapperService().fieldType(sFieldName) == null) return null; // could be _script sort that uses _score - } - } - - // check that setting of missing values allows optimization - if (sortField.getMissingValue() == null) return null; - Long missingValue = (Long) sortField.getMissingValue(); - boolean missingValuesAccordingToSort = (sortField.getReverse() && (missingValue == Long.MIN_VALUE)) - || ((sortField.getReverse() == false) && (missingValue == Long.MAX_VALUE)); - if (missingValuesAccordingToSort == false) return null; - - int docCount = PointValues.getDocCount(reader, fieldName); - // is not worth to run optimization on small index - if (docCount <= 512) return null; - - // check for multiple values - if (PointValues.size(reader, fieldName) != docCount) return null; // TODO: handle multiple values - - // check if the optimization makes sense with the track_total_hits setting - if (searchContext.trackTotalHitsUpTo() == Integer.MAX_VALUE) { - // with filter, we can't pre-calculate hitsCount, we need to explicitly calculate them => optimization does't make sense - if (hasFilterCollector) return null; - // if we can't pre-calculate hitsCount based on the query type, optimization does't make sense - if (shortcutTotalHitCount(reader, query) == -1) return null; - } - - byte[] minValueBytes = PointValues.getMinPackedValue(reader, fieldName); - byte[] maxValueBytes = PointValues.getMaxPackedValue(reader, fieldName); - if ((maxValueBytes == null) || (minValueBytes == null)) return null; - long minValue = LongPoint.decodeDimension(minValueBytes, 0); - long maxValue = LongPoint.decodeDimension(maxValueBytes, 0); - - Query rewrittenQuery; - if (minValue == maxValue) { - rewrittenQuery = new DocValuesFieldExistsQuery(fieldName); - } else { - if (indexFieldHasDuplicateData(reader, fieldName)) return null; - long origin = (sortField.getReverse()) ? maxValue : minValue; - long pivotDistance = (maxValue - minValue) >>> 1; // division by 2 on the unsigned representation to avoid overflow - if (pivotDistance == 0) { // 0 if maxValue = (minValue + 1) - pivotDistance = 1; - } - rewrittenQuery = LongPoint.newDistanceFeatureQuery(sortField.getField(), 1, origin, pivotDistance); - } - rewrittenQuery = new BooleanQuery.Builder().add(query, BooleanClause.Occur.FILTER) // filter for original query - .add(rewrittenQuery, BooleanClause.Occur.SHOULD) // should for rewrittenQuery - .build(); - return rewrittenQuery; - } - - /** - * Creates a sorter of {@link LeafReaderContext} that orders leaves depending on the minimum - * value and the sort order of the provided sortField. - */ - static CheckedConsumer, IOException> createLeafSorter(SortField sortField) { - return leaves -> { - long[] sortValues = new long[leaves.size()]; - long missingValue = (long) sortField.getMissingValue(); - for (LeafReaderContext ctx : leaves) { - PointValues values = ctx.reader().getPointValues(sortField.getField()); - if (values == null) { - sortValues[ctx.ord] = missingValue; - } else { - byte[] sortValue = sortField.getReverse() ? values.getMaxPackedValue() : values.getMinPackedValue(); - sortValues[ctx.ord] = sortValue == null ? missingValue : LongPoint.decodeDimension(sortValue, 0); - } - } - Comparator comparator = Comparator.comparingLong(l -> sortValues[l.ord]); - if (sortField.getReverse()) { - comparator = comparator.reversed(); - } - Collections.sort(leaves, comparator); - }; - } - - /** - * Restore fieldsDocs to remove the first _score - */ - private static void restoreTopFieldDocs(QuerySearchResult result, SortAndFormats originalSortAndFormats) { - TopDocs topDocs = result.topDocs().topDocs; - for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - FieldDoc fieldDoc = (FieldDoc) scoreDoc; - fieldDoc.fields = Arrays.copyOfRange(fieldDoc.fields, 1, fieldDoc.fields.length); - } - TopFieldDocs newTopDocs = new TopFieldDocs(topDocs.totalHits, topDocs.scoreDocs, originalSortAndFormats.sort.getSort()); - result.topDocs(new TopDocsAndMaxScore(newTopDocs, Float.NaN), originalSortAndFormats.formats); + if (fieldType == null) return; // for unmapped fields, default behaviour depending on "unmapped_type" flag + if ((fieldType.typeName().equals("long") == false) && (fieldType instanceof DateFieldType == false)) return; + if (fieldType.isSearchable() == false) return; + if (fieldType.hasDocValues() == false) return; + sortField.setCanUsePoints(); } /** @@ -591,81 +387,5 @@ private static boolean canEarlyTerminate(IndexReader reader, SortAndFormats sort return true; } - /** - * Returns true if more than 50% of data in the index have the same value - * The evaluation is approximation based on finding the median value and estimating its count - */ - private static boolean indexFieldHasDuplicateData(IndexReader reader, String field) throws IOException { - long docsNoDupl = 0; // number of docs in segments with NO duplicate data that would benefit optimization - long docsDupl = 0; // number of docs in segments with duplicate data that would NOT benefit optimization - for (LeafReaderContext lrc : reader.leaves()) { - PointValues pointValues = lrc.reader().getPointValues(field); - if (pointValues == null) continue; - int docCount = pointValues.getDocCount(); - if (docCount <= 512) { // skipping small segments as estimateMedianCount doesn't work well on them - continue; - } - assert (pointValues.size() == docCount); // TODO: modify the code to handle multiple values - int duplDocCount = docCount / 2; // expected doc count of duplicate data - if (pointsHaveDuplicateData(pointValues, duplDocCount)) { - docsDupl += docCount; - } else { - docsNoDupl += docCount; - } - } - return (docsDupl > docsNoDupl); - } - - static boolean pointsHaveDuplicateData(PointValues pointValues, int duplDocCount) throws IOException { - long minValue = LongPoint.decodeDimension(pointValues.getMinPackedValue(), 0); - long maxValue = LongPoint.decodeDimension(pointValues.getMaxPackedValue(), 0); - boolean hasDuplicateData = true; - while ((minValue < maxValue) && hasDuplicateData) { - long midValue = Math.floorDiv(minValue, 2) + Math.floorDiv(maxValue, 2); // to avoid overflow first divide each value by 2 - long countLeft = estimatePointCount(pointValues, minValue, midValue); - long countRight = estimatePointCount(pointValues, midValue + 1, maxValue); - if ((countLeft >= countRight) && (countLeft > duplDocCount)) { - maxValue = midValue; - } else if ((countRight > countLeft) && (countRight > duplDocCount)) { - minValue = midValue + 1; - } else { - hasDuplicateData = false; - } - } - return hasDuplicateData; - } - - private static long estimatePointCount(PointValues pointValues, long minValue, long maxValue) { - final byte[] minValueAsBytes = new byte[Long.BYTES]; - LongPoint.encodeDimension(minValue, minValueAsBytes, 0); - final byte[] maxValueAsBytes = new byte[Long.BYTES]; - LongPoint.encodeDimension(maxValue, maxValueAsBytes, 0); - - PointValues.IntersectVisitor visitor = new PointValues.IntersectVisitor() { - @Override - public void grow(int count) {} - - @Override - public void visit(int docID) {} - - @Override - public void visit(int docID, byte[] packedValue) {} - - @Override - public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - if (FutureArrays.compareUnsigned(minPackedValue, 0, Long.BYTES, maxValueAsBytes, 0, Long.BYTES) > 0 - || FutureArrays.compareUnsigned(maxPackedValue, 0, Long.BYTES, minValueAsBytes, 0, Long.BYTES) < 0) { - return PointValues.Relation.CELL_OUTSIDE_QUERY; - } - if (FutureArrays.compareUnsigned(minPackedValue, 0, Long.BYTES, minValueAsBytes, 0, Long.BYTES) < 0 - || FutureArrays.compareUnsigned(maxPackedValue, 0, Long.BYTES, maxValueAsBytes, 0, Long.BYTES) > 0) { - return PointValues.Relation.CELL_CROSSES_QUERY; - } - return PointValues.Relation.CELL_INSIDE_QUERY; - } - }; - return pointValues.estimatePointCount(visitor); - } - private static class TimeExceededException extends RuntimeException {} } diff --git a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java index 17d778eb358b3..d4a3b8651e7a1 100644 --- a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java @@ -41,6 +41,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; +import org.opensearch.common.ParsingException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; @@ -752,6 +753,27 @@ public static FieldSortBuilder fromXContent(XContentParser parser, String fieldN return PARSER.parse(parser, new FieldSortBuilder(fieldName), null); } + public static FieldSortBuilder fromXContentObject(XContentParser parser, String fieldName) throws IOException { + FieldSortBuilder builder = null; + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + builder = fromXContent(parser, currentFieldName); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] does not support [" + currentFieldName + "]"); + } + } + + if (builder == null) { + throw new ParsingException(parser.getTokenLocation(), "Invalid " + NAME); + } + + return builder; + } + private static final ObjectParser PARSER = new ObjectParser<>(NAME); static { diff --git a/server/src/main/java/org/opensearch/search/sort/SortBuilder.java b/server/src/main/java/org/opensearch/search/sort/SortBuilder.java index 5452556329361..09470f0b2cb45 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/SortBuilder.java @@ -41,6 +41,7 @@ import org.opensearch.common.Strings; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.common.xcontent.NamedObjectNotFoundException; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -53,13 +54,10 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.Optional; -import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; public abstract class SortBuilder> implements NamedWriteable, ToXContentObject, Rewriteable> { @@ -71,17 +69,6 @@ public abstract class SortBuilder> implements NamedWrit public static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter"); public static final ParseField NESTED_PATH_FIELD = new ParseField("nested_path"); - private static final Map> PARSERS; - static { - Map> parsers = new HashMap<>(); - parsers.put(ScriptSortBuilder.NAME, ScriptSortBuilder::fromXContent); - parsers.put(GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::fromXContent); - parsers.put(GeoDistanceSortBuilder.ALTERNATIVE_NAME, GeoDistanceSortBuilder::fromXContent); - parsers.put(ScoreSortBuilder.NAME, ScoreSortBuilder::fromXContent); - // FieldSortBuilder gets involved if the user specifies a name that isn't one of these. - PARSERS = unmodifiableMap(parsers); - } - /** * Create a {@linkplain SortFieldAndFormat} from this builder. */ @@ -155,9 +142,10 @@ private static void parseCompoundSortField(XContentParser parser, List sort = parser.namedObject(SortBuilder.class, fieldName, null); + sortFields.add(sort); + } catch (NamedObjectNotFoundException err) { sortFields.add(FieldSortBuilder.fromXContent(parser, fieldName)); } } @@ -290,11 +278,6 @@ protected static QueryBuilder parseNestedFilter(XContentParser parser) { } } - @FunctionalInterface - private interface Parser> { - T fromXContent(XContentParser parser, String elementName) throws IOException; - } - @Override public String toString() { return Strings.toString(this, true, true); diff --git a/server/src/main/java/org/opensearch/search/sort/SortParser.java b/server/src/main/java/org/opensearch/search/sort/SortParser.java new file mode 100644 index 0000000000000..870981c47382c --- /dev/null +++ b/server/src/main/java/org/opensearch/search/sort/SortParser.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.sort; + +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; + +@FunctionalInterface +public interface SortParser> { + /** + * Creates a new {@link SortBuilder} from the sort held by the + * {@link XContentParser}. The state on the parser contained in this context + * will be changed as a side effect of this method call + */ + SB fromXContent(XContentParser parser, String elementName) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/snapshots/InFlightShardSnapshotStates.java b/server/src/main/java/org/opensearch/snapshots/InFlightShardSnapshotStates.java index 81749b274bd96..469b57562284f 100644 --- a/server/src/main/java/org/opensearch/snapshots/InFlightShardSnapshotStates.java +++ b/server/src/main/java/org/opensearch/snapshots/InFlightShardSnapshotStates.java @@ -96,8 +96,8 @@ private static void addStateInformation( busyIds.computeIfAbsent(indexName, k -> new HashSet<>()).add(shardId); assert assertGenerationConsistency(generations, indexName, shardId, shardState.generation()); } else if (shardState.state() == SnapshotsInProgress.ShardState.SUCCESS) { - assert busyIds.getOrDefault(indexName, Collections.emptySet()) - .contains(shardId) == false : "Can't have a successful operation queued after an in-progress operation"; + assert busyIds.getOrDefault(indexName, Collections.emptySet()).contains(shardId) == false + : "Can't have a successful operation queued after an in-progress operation"; generations.computeIfAbsent(indexName, k -> new HashMap<>()).put(shardId, shardState.generation()); } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 1c357ca79202f..3b765cf179821 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -274,9 +274,8 @@ private void startNewShards(SnapshotsInProgress.Entry entry, Map() { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 3ab5cd64ff11f..122c13337fa70 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -1924,8 +1924,8 @@ private void runReadyDeletions(RepositoryData repositoryData, String repository) @Override public ClusterState execute(ClusterState currentState) { - assert readyDeletions(currentState) - .v1() == currentState : "Deletes should have been set to ready by finished snapshot deletes and finalizations"; + assert readyDeletions(currentState).v1() == currentState + : "Deletes should have been set to ready by finished snapshot deletes and finalizations"; for (SnapshotDeletionsInProgress.Entry entry : currentState.custom( SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY @@ -2667,8 +2667,8 @@ private void deleteSnapshotsFromRepository(SnapshotDeletionsInProgress.Entry del repositoriesService.getRepositoryData(deleteEntry.repository(), new ActionListener() { @Override public void onResponse(RepositoryData repositoryData) { - assert repositoryData - .getGenId() == expectedRepoGen : "Repository generation should not change as long as a ready delete is found in the cluster state but found [" + assert repositoryData.getGenId() == expectedRepoGen + : "Repository generation should not change as long as a ready delete is found in the cluster state but found [" + expectedRepoGen + "] in cluster state and [" + repositoryData.getGenId() @@ -2746,9 +2746,8 @@ protected SnapshotDeletionsInProgress filterDeletions(SnapshotDeletionsInProgres @Override protected void handleListeners(List> deleteListeners) { - assert repositoryData.getSnapshotIds() - .stream() - .noneMatch(deleteEntry.getSnapshots()::contains) : "Repository data contained snapshot ids " + assert repositoryData.getSnapshotIds().stream().noneMatch(deleteEntry.getSnapshots()::contains) + : "Repository data contained snapshot ids " + repositoryData.getSnapshotIds() + " that should should been deleted by [" + deleteEntry @@ -2866,12 +2865,8 @@ public final void clusterStateProcessed(String source, ClusterState oldState, Cl } } else { leaveRepoLoop(deleteEntry.repository()); - assert readyDeletions.stream() - .noneMatch(entry -> entry.repository().equals(deleteEntry.repository())) : "New finalizations " - + newFinalizations - + " added even though deletes " - + readyDeletions - + " are ready"; + assert readyDeletions.stream().noneMatch(entry -> entry.repository().equals(deleteEntry.repository())) + : "New finalizations " + newFinalizations + " added even though deletes " + readyDeletions + " are ready"; for (SnapshotsInProgress.Entry entry : newFinalizations) { endSnapshot(entry, newState.metadata(), repositoryData); } @@ -3837,8 +3832,8 @@ synchronized boolean assertNotQueued(Snapshot snapshot) { synchronized boolean assertConsistent() { assert (latestKnownMetaData == null && snapshotsToFinalize.isEmpty()) - || (latestKnownMetaData != null - && snapshotsToFinalize.isEmpty() == false) : "Should not hold on to metadata if there are no more queued snapshots"; + || (latestKnownMetaData != null && snapshotsToFinalize.isEmpty() == false) + : "Should not hold on to metadata if there are no more queued snapshots"; assert snapshotsToFinalize.values().stream().noneMatch(Collection::isEmpty) : "Found empty queue in " + snapshotsToFinalize; return true; } diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index bf4ada279001b..6aa319934b478 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -47,6 +47,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.threadpool.ThreadPool; +import java.io.EOFException; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; @@ -149,27 +150,13 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st streamInput = namedWriteableStream(message.openOrGetStreamInput()); assertRemoteVersion(streamInput, header.getVersion()); if (header.isError()) { - handlerResponseError(streamInput, handler); + handlerResponseError(requestId, streamInput, handler); } else { - handleResponse(remoteAddress, streamInput, handler); - } - // Check the entire message has been read - final int nextByte = streamInput.read(); - // calling read() is useful to make sure the message is fully read, even if there is an EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (response) for requestId [" - + requestId - + "], handler [" - + handler - + "], error [" - + header.isError() - + "]; resetting" - ); + handleResponse(requestId, remoteAddress, streamInput, handler); } } else { assert header.isError() == false; - handleResponse(remoteAddress, EMPTY_STREAM_INPUT, handler); + handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); } } } @@ -246,22 +233,11 @@ private void handleRequest(TcpChannel channel, Head assertRemoteVersion(stream, header.getVersion()); final RequestHandlerRegistry reg = requestHandlers.getHandler(action); assert reg != null; - final T request = reg.newRequest(stream); + + final T request = newRequest(requestId, action, stream, reg); request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - // in case we throw an exception, i.e. when the limit is hit, we don't want to verify - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (request) for requestId [" - + requestId - + "], action [" - + action - + "], available [" - + stream.available() - + "]; resetting" - ); - } + checkStreamIsFullyConsumed(requestId, action, stream); + final String executor = reg.getExecutor(); if (ThreadPool.Names.SAME.equals(executor)) { try { @@ -279,6 +255,97 @@ private void handleRequest(TcpChannel channel, Head } } + /** + * Creates new request instance out of input stream. Throws IllegalStateException if the end of + * the stream was reached before the request is fully deserialized from the stream. + * @param transport request type + * @param requestId request identifier + * @param action action name + * @param stream stream + * @param reg request handler registry + * @return new request instance + * @throws IOException IOException + * @throws IllegalStateException IllegalStateException + */ + private T newRequest( + final long requestId, + final String action, + final StreamInput stream, + final RequestHandlerRegistry reg + ) throws IOException { + try { + return reg.newRequest(stream); + } catch (final EOFException e) { + // Another favor of (de)serialization issues is when stream contains less bytes than + // the request handler needs to deserialize the payload. + throw new IllegalStateException( + "Message fully read (request) but more data is expected for requestId [" + + requestId + + "], action [" + + action + + "]; resetting", + e + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param action action name + * @param stream stream + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + final int nextByte = stream.read(); + + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (request) for requestId [" + + requestId + + "], action [" + + action + + "], available [" + + stream.available() + + "]; resetting" + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param handler response handler + * @param stream stream + * @param error "true" if response represents error, "false" otherwise + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed( + final long requestId, + final TransportResponseHandler handler, + final StreamInput stream, + final boolean error + ) throws IOException { + if (stream != EMPTY_STREAM_INPUT) { + // Check the entire message has been read + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (response) for requestId [" + + requestId + + "], handler [" + + handler + + "], error [" + + error + + "]; resetting" + ); + } + } + } + private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { try { transportChannel.sendResponse(e); @@ -289,6 +356,7 @@ private static void sendErrorResponse(String actionName, TransportChannel transp } private void handleResponse( + final long requestId, InetSocketAddress remoteAddress, final StreamInput stream, final TransportResponseHandler handler @@ -297,6 +365,7 @@ private void handleResponse( try { response = handler.read(stream); response.remoteAddress(new TransportAddress(remoteAddress)); + checkStreamIsFullyConsumed(requestId, handler, stream, false); } catch (Exception e) { final Exception serializationException = new TransportSerializationException( "Failed to deserialize response from handler [" + handler + "]", @@ -322,10 +391,11 @@ private void doHandleResponse(TransportResponseHan } } - private void handlerResponseError(StreamInput stream, final TransportResponseHandler handler) { + private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { Exception error; try { error = stream.readException(); + checkStreamIsFullyConsumed(requestId, handler, stream, true); } catch (Exception e) { error = new TransportSerializationException( "Failed to deserialize exception response from stream for handler [" + handler + "]", diff --git a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java index 5cf910e936bf9..c85a8eebd8fbd 100644 --- a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java @@ -63,6 +63,9 @@ final class TransportHandshaker { private final ThreadPool threadPool; private final HandshakeRequestSender handshakeRequestSender; + // @todo remove in 3.0.0 + static final Version V_3_0_0 = Version.fromId(3000099 ^ Version.MASK); + TransportHandshaker(Version version, ThreadPool threadPool, HandshakeRequestSender handshakeRequestSender) { this.version = version; this.threadPool = threadPool; @@ -131,8 +134,9 @@ void handleHandshake(TransportChannel channel, long requestId, StreamInput strea // 1. if remote node is 7.x, then StreamInput version would be 6.8.0 // 2. if remote node is 6.8 then it would be 5.6.0 // 3. if remote node is OpenSearch 1.x then it would be 6.7.99 - if ((this.version.onOrAfter(Version.V_1_0_0) && this.version.before(Version.V_2_0_0)) + if ((this.version.onOrAfter(Version.V_1_0_0) && this.version.before(V_3_0_0)) && (stream.getVersion().equals(LegacyESVersion.fromId(6080099)) || stream.getVersion().equals(Version.fromId(5060099)))) { + // send 7.10.2 in response to ensure compatibility w/ Legacy 7.10.x nodes for rolling upgrade support channel.sendResponse(new HandshakeResponse(LegacyESVersion.V_7_10_2)); } else { channel.sendResponse(new HandshakeResponse(this.version)); diff --git a/server/src/main/java/org/opensearch/transport/Transports.java b/server/src/main/java/org/opensearch/transport/Transports.java index bce428221fb75..7d9f013db5338 100644 --- a/server/src/main/java/org/opensearch/transport/Transports.java +++ b/server/src/main/java/org/opensearch/transport/Transports.java @@ -77,11 +77,8 @@ public static boolean assertNotTransportThread(String reason) { public static boolean assertDefaultThreadContext(ThreadContext threadContext) { assert threadContext.getRequestHeadersOnly().isEmpty() - || threadContext.getRequestHeadersOnly().size() == 1 - && threadContext.getRequestHeadersOnly().containsKey(Task.X_OPAQUE_ID) : "expected empty context but was " - + threadContext.getRequestHeadersOnly() - + " on " - + Thread.currentThread().getName(); + || threadContext.getRequestHeadersOnly().size() == 1 && threadContext.getRequestHeadersOnly().containsKey(Task.X_OPAQUE_ID) + : "expected empty context but was " + threadContext.getRequestHeadersOnly() + " on " + Thread.currentThread().getName(); return true; } } diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index a1455a715e461..b5859e1fb18a9 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -465,11 +465,10 @@ public void testSearchPhaseExecutionException() throws IOException { } public void testRoutingMissingException() throws IOException { - RoutingMissingException ex = serialize(new RoutingMissingException("idx", "type", "id")); + RoutingMissingException ex = serialize(new RoutingMissingException("idx", "id")); assertEquals("idx", ex.getIndex().getName()); - assertEquals("type", ex.getType()); assertEquals("id", ex.getId()); - assertEquals("routing is required for [idx]/[type]/[id]", ex.getMessage()); + assertEquals("routing is required for [idx]/[id]", ex.getMessage()); } public void testRepositoryException() throws IOException { diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index 3d0b334622cd5..9f32af143ee2d 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -566,7 +566,7 @@ public void testFromXContent() throws IOException { public void testFromXContentWithCause() throws IOException { OpenSearchException e = new OpenSearchException( "foo", - new OpenSearchException("bar", new OpenSearchException("baz", new RoutingMissingException("_test", "_type", "_id"))) + new OpenSearchException("bar", new OpenSearchException("baz", new RoutingMissingException("_test", "_id"))) ); final XContent xContent = randomFrom(XContentType.values()).xContent(); @@ -594,7 +594,7 @@ public void testFromXContentWithCause() throws IOException { cause = (OpenSearchException) cause.getCause(); assertEquals( cause.getMessage(), - "OpenSearch exception [type=routing_missing_exception, reason=routing is required for [_test]/[_type]/[_id]]" + "OpenSearch exception [type=routing_missing_exception, reason=routing is required for [_test]/[_id]]" ); assertThat(cause.getHeaderKeys(), hasSize(0)); assertThat(cause.getMetadataKeys(), hasSize(2)); @@ -603,7 +603,7 @@ public void testFromXContentWithCause() throws IOException { } public void testFromXContentWithHeadersAndMetadata() throws IOException { - RoutingMissingException routing = new RoutingMissingException("_test", "_type", "_id"); + RoutingMissingException routing = new RoutingMissingException("_test", "_id"); OpenSearchException baz = new OpenSearchException("baz", routing); baz.addHeader("baz_0", "baz0"); baz.addMetadata("opensearch.baz_1", "baz1"); @@ -656,7 +656,7 @@ public void testFromXContentWithHeadersAndMetadata() throws IOException { cause = (OpenSearchException) cause.getCause(); assertEquals( cause.getMessage(), - "OpenSearch exception [type=routing_missing_exception, reason=routing is required for [_test]/[_type]/[_id]]" + "OpenSearch exception [type=routing_missing_exception, reason=routing is required for [_test]/[_id]]" ); assertThat(cause.getHeaderKeys(), hasSize(0)); assertThat(cause.getMetadataKeys(), hasSize(2)); @@ -878,11 +878,11 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { break; case 4: // JDK exception with cause - failureCause = new RoutingMissingException("idx", "type", "id"); + failureCause = new RoutingMissingException("idx", "id"); failure = new RuntimeException("E", failureCause); expectedCause = new OpenSearchException( - "OpenSearch exception [type=routing_missing_exception, " + "reason=routing is required for [idx]/[type]/[id]]" + "OpenSearch exception [type=routing_missing_exception, " + "reason=routing is required for [idx]/[id]]" ); expectedCause.addMetadata("opensearch.index", "idx"); expectedCause.addMetadata("opensearch.index_uuid", "_na_"); diff --git a/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java b/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java index e80d5b1c70bd1..30dcaf8d9c1c1 100644 --- a/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java @@ -39,6 +39,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; @@ -53,7 +54,6 @@ public class DocWriteResponseTests extends OpenSearchTestCase { public void testGetLocation() { final DocWriteResponse response = new DocWriteResponse( new ShardId("index", "uuid", 0), - "type", "id", SequenceNumbers.UNASSIGNED_SEQ_NO, 17, @@ -61,14 +61,13 @@ public void testGetLocation() { Result.CREATED ) { }; - assertEquals("/index/type/id", response.getLocation(null)); - assertEquals("/index/type/id?routing=test_routing", response.getLocation("test_routing")); + assertEquals("/index/" + MapperService.SINGLE_MAPPING_NAME + "/id", response.getLocation(null)); + assertEquals("/index/" + MapperService.SINGLE_MAPPING_NAME + "/id?routing=test_routing", response.getLocation("test_routing")); } public void testGetLocationNonAscii() { final DocWriteResponse response = new DocWriteResponse( new ShardId("index", "uuid", 0), - "type", "❤", SequenceNumbers.UNASSIGNED_SEQ_NO, 17, @@ -76,14 +75,13 @@ public void testGetLocationNonAscii() { Result.CREATED ) { }; - assertEquals("/index/type/%E2%9D%A4", response.getLocation(null)); - assertEquals("/index/type/%E2%9D%A4?routing=%C3%A4", response.getLocation("ä")); + assertEquals("/index/" + MapperService.SINGLE_MAPPING_NAME + "/%E2%9D%A4", response.getLocation(null)); + assertEquals("/index/" + MapperService.SINGLE_MAPPING_NAME + "/%E2%9D%A4?routing=%C3%A4", response.getLocation("ä")); } public void testGetLocationWithSpaces() { final DocWriteResponse response = new DocWriteResponse( new ShardId("index", "uuid", 0), - "type", "a b", SequenceNumbers.UNASSIGNED_SEQ_NO, 17, @@ -91,8 +89,8 @@ public void testGetLocationWithSpaces() { Result.CREATED ) { }; - assertEquals("/index/type/a+b", response.getLocation(null)); - assertEquals("/index/type/a+b?routing=c+d", response.getLocation("c d")); + assertEquals("/index/" + MapperService.SINGLE_MAPPING_NAME + "/a+b", response.getLocation(null)); + assertEquals("/index/" + MapperService.SINGLE_MAPPING_NAME + "/a+b?routing=c+d", response.getLocation("c d")); } /** @@ -102,7 +100,6 @@ public void testGetLocationWithSpaces() { public void testToXContentDoesntIncludeForcedRefreshUnlessForced() throws IOException { DocWriteResponse response = new DocWriteResponse( new ShardId("index", "uuid", 0), - "type", "id", SequenceNumbers.UNASSIGNED_SEQ_NO, 17, diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 43d5a85094a36..72c7b5168fe15 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -53,7 +53,6 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.Engine; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; @@ -78,6 +77,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import static org.mockito.Mockito.doNothing; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; @@ -194,8 +194,7 @@ private void executeOnPrimaryOrReplica(boolean phase1) throws Throwable { public void testShardIsFlushed() throws Throwable { final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); - when(indexShard.flush(flushRequest.capture())).thenReturn(new Engine.CommitId(new byte[0])); - + doNothing().when(indexShard).flush(flushRequest.capture()); executeOnPrimaryOrReplica(); verify(indexShard, times(1)).flush(any(FlushRequest.class)); assertThat(flushRequest.getValue().force(), is(true)); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/flush/SyncedFlushUnitTests.java b/server/src/test/java/org/opensearch/action/admin/indices/flush/SyncedFlushUnitTests.java deleted file mode 100644 index a2f85228024ea..0000000000000 --- a/server/src/test/java/org/opensearch/action/admin/indices/flush/SyncedFlushUnitTests.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.flush; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import org.opensearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.routing.ShardRoutingState; -import org.opensearch.cluster.routing.TestShardRouting; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.index.shard.ShardId; -import org.opensearch.indices.flush.ShardsSyncedFlushResult; -import org.opensearch.indices.flush.SyncedFlushService; -import org.opensearch.rest.RestStatus; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.opensearch.test.XContentTestUtils.convertToMap; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; - -public class SyncedFlushUnitTests extends OpenSearchTestCase { - - private static class TestPlan { - public SyncedFlushResponse.ShardCounts totalCounts; - public Map countsPerIndex = new HashMap<>(); - public ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - public SyncedFlushResponse result; - } - - public void testIndicesSyncedFlushResult() throws IOException { - final TestPlan testPlan = createTestPlan(); - assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - Map asMap = convertToMap(testPlan.result); - assertShardCount("_shards header", (Map) asMap.get("_shards"), testPlan.totalCounts); - - assertThat("unexpected number of indices", asMap.size(), equalTo(1 + testPlan.countsPerIndex.size())); // +1 for the shards header - for (String index : testPlan.countsPerIndex.keySet()) { - Map indexMap = (Map) asMap.get(index); - assertShardCount(index, indexMap, testPlan.countsPerIndex.get(index)); - List> failureList = (List>) indexMap.get("failures"); - final int expectedFailures = testPlan.expectedFailuresPerIndex.get(index); - if (expectedFailures == 0) { - assertNull(index + " has unexpected failures", failureList); - } else { - assertNotNull(index + " should have failures", failureList); - assertThat(failureList, hasSize(expectedFailures)); - } - } - } - - public void testResponseStreaming() throws IOException { - final TestPlan testPlan = createTestPlan(); - assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - BytesStreamOutput out = new BytesStreamOutput(); - testPlan.result.writeTo(out); - StreamInput in = out.bytes().streamInput(); - SyncedFlushResponse readResponse = new SyncedFlushResponse(in); - assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - assertThat(readResponse.getShardsResultPerIndex().size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); - for (Map.Entry> entry : readResponse.getShardsResultPerIndex().entrySet()) { - List originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey()); - assertNotNull(originalShardsResults); - List readShardsResults = entry.getValue(); - assertThat(readShardsResults.size(), equalTo(originalShardsResults.size())); - for (int i = 0; i < readShardsResults.size(); i++) { - ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i); - ShardsSyncedFlushResult readShardResult = readShardsResults.get(i); - assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason())); - assertThat(originalShardResult.failed(), equalTo(readShardResult.failed())); - assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId())); - assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards())); - assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); - assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); - assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); - for (Map.Entry shardEntry : originalShardResult.failedShards() - .entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); - assertNotNull(readShardResponse); - SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); - assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); - assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); - } - assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); - for (Map.Entry shardEntry : originalShardResult.shardResponses() - .entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses() - .get(shardEntry.getKey()); - assertNotNull(readShardResponse); - SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); - assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); - assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); - } - } - } - } - - private void assertShardCount(String name, Map header, ShardCounts expectedCounts) { - assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total)); - assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful)); - assertThat(name + " has unexpected failed count", (Integer) header.get("failed"), equalTo(expectedCounts.failed)); - } - - protected TestPlan createTestPlan() { - final TestPlan testPlan = new TestPlan(); - final Map> indicesResults = new HashMap<>(); - final int indexCount = randomIntBetween(1, 10); - int totalShards = 0; - int totalSuccesful = 0; - int totalFailed = 0; - for (int i = 0; i < indexCount; i++) { - final String index = "index_" + i; - int shards = randomIntBetween(1, 4); - int replicas = randomIntBetween(0, 2); - int successful = 0; - int failed = 0; - int failures = 0; - List shardsResults = new ArrayList<>(); - for (int shard = 0; shard < shards; shard++) { - final ShardId shardId = new ShardId(index, "_na_", shard); - if (randomInt(5) < 2) { - // total shard failure - failed += replicas + 1; - failures++; - shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); - } else { - Map shardResponses = new HashMap<>(); - for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = TestShardRouting.newShardRouting( - index, - shard, - "node_" + shardId + "_" + copy, - null, - copy == 0, - ShardRoutingState.STARTED - ); - if (randomInt(5) < 2) { - // shard copy failure - failed++; - failures++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); - } else { - successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); - } - } - shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); - } - } - indicesResults.put(index, shardsResults); - testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); - testPlan.expectedFailuresPerIndex.put(index, failures); - totalFailed += failed; - totalShards += shards * (replicas + 1); - totalSuccesful += successful; - } - testPlan.result = new SyncedFlushResponse(indicesResults); - testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed); - return testPlan; - } - -} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java index 869928e216815..0cf9f9fe152d6 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java @@ -60,12 +60,7 @@ public void testSegmentStatsEmptyIndex() { createIndex("test"); IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get(); SegmentsStats stats = rsp.getTotal().getSegments(); - assertEquals(0, stats.getTermsMemoryInBytes()); - assertEquals(0, stats.getStoredFieldsMemoryInBytes()); - assertEquals(0, stats.getTermVectorsMemoryInBytes()); - assertEquals(0, stats.getNormsMemoryInBytes()); - assertEquals(0, stats.getPointsMemoryInBytes()); - assertEquals(0, stats.getDocValuesMemoryInBytes()); + assertEquals(0, stats.getCount()); } public void testSegmentStats() throws Exception { @@ -102,16 +97,8 @@ public void testSegmentStats() throws Exception { IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get(); SegmentsStats stats = rsp.getIndex("test").getTotal().getSegments(); - assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getTermVectorsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getNormsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L)); - if ((storeType == IndexModule.Type.MMAPFS) || (storeType == IndexModule.Type.HYBRIDFS)) { - assertEquals(0, stats.getPointsMemoryInBytes()); // bkd tree is stored off-heap - } else { - assertThat(stats.getPointsMemoryInBytes(), greaterThan(0L)); // bkd tree is stored on heap - } + // should be more than one segment since data was indexed + assertThat(stats.getCount(), greaterThan(0L)); // now check multiple segments stats are merged together client().prepareIndex("test", "doc", "2").setSource("foo", "bar", "bar", "baz", "baz", 43).get(); @@ -119,16 +106,8 @@ public void testSegmentStats() throws Exception { rsp = client().admin().indices().prepareStats("test").get(); SegmentsStats stats2 = rsp.getIndex("test").getTotal().getSegments(); - assertThat(stats2.getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes())); - assertThat(stats2.getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes())); - assertThat(stats2.getTermVectorsMemoryInBytes(), greaterThan(stats.getTermVectorsMemoryInBytes())); - assertThat(stats2.getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes())); - assertThat(stats2.getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes())); - if ((storeType == IndexModule.Type.MMAPFS) || (storeType == IndexModule.Type.HYBRIDFS)) { - assertEquals(0, stats2.getPointsMemoryInBytes()); // bkd tree is stored off-heap - } else { - assertThat(stats2.getPointsMemoryInBytes(), greaterThan(stats.getPointsMemoryInBytes())); // bkd tree is stored on heap - } + // stats2 should exceed stats since multiple segments stats were merged + assertThat(stats2.getCount(), greaterThan(stats.getCount())); } public void testCommitStats() throws Exception { diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java index 4d33b389c314c..808872fee6f96 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java @@ -57,7 +57,7 @@ public class BulkItemResponseTests extends OpenSearchTestCase { public void testFailureToString() { - Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); + Failure failure = new Failure("index", "id", new RuntimeException("test")); String toString = failure.toString(); assertThat(toString, containsString("\"type\":\"runtime_exception\"")); assertThat(toString, containsString("\"reason\":\"test\"")); @@ -101,16 +101,15 @@ public void testFailureToAndFromXContent() throws IOException { int itemId = randomIntBetween(0, 100); String index = randomAlphaOfLength(5); - String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); final Tuple exceptions = randomExceptions(); Exception bulkItemCause = (Exception) exceptions.v1(); - Failure bulkItemFailure = new Failure(index, type, id, bulkItemCause); + Failure bulkItemFailure = new Failure(index, id, bulkItemCause); BulkItemResponse bulkItemResponse = new BulkItemResponse(itemId, opType, bulkItemFailure); - Failure expectedBulkItemFailure = new Failure(index, type, id, exceptions.v2(), ExceptionsHelper.status(bulkItemCause)); + Failure expectedBulkItemFailure = new Failure(index, id, exceptions.v2(), ExceptionsHelper.status(bulkItemCause)); BulkItemResponse expectedBulkItemResponse = new BulkItemResponse(itemId, opType, expectedBulkItemFailure); BytesReference originalBytes = toShuffledXContent(bulkItemResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -133,7 +132,6 @@ public void testFailureToAndFromXContent() throws IOException { public static void assertBulkItemResponse(BulkItemResponse expected, BulkItemResponse actual) { assertEquals(expected.getItemId(), actual.getItemId()); assertEquals(expected.getIndex(), actual.getIndex()); - assertEquals(expected.getType(), actual.getType()); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getOpType(), actual.getOpType()); assertEquals(expected.getVersion(), actual.getVersion()); @@ -144,7 +142,6 @@ public static void assertBulkItemResponse(BulkItemResponse expected, BulkItemRes BulkItemResponse.Failure actualFailure = actual.getFailure(); assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); - assertEquals(expectedFailure.getType(), actualFailure.getType()); assertEquals(expectedFailure.getId(), actualFailure.getId()); assertEquals(expectedFailure.getMessage(), actualFailure.getMessage()); assertEquals(expectedFailure.getStatus(), actualFailure.getStatus()); diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java index b98bdb2e3e40d..5159135a22618 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java @@ -85,16 +85,16 @@ private BulkShardRequest generateRandomRequest() { final DocWriteRequest request; switch (randomFrom(DocWriteRequest.OpType.values())) { case INDEX: - request = new IndexRequest("index", "_doc", "id_" + i); + request = new IndexRequest("index").id("id_" + i); break; case CREATE: - request = new IndexRequest("index", "_doc", "id_" + i).create(true); + request = new IndexRequest("index").id("id_" + i).create(true); break; case UPDATE: - request = new UpdateRequest("index", "_doc", "id_" + i); + request = new UpdateRequest("index", "id_" + i); break; case DELETE: - request = new DeleteRequest("index", "_doc", "id_" + i); + request = new DeleteRequest("index", "id_" + i); break; default: throw new AssertionError("unknown type"); @@ -139,7 +139,7 @@ public void testTranslogLocation() { } break; case UPDATE: - context.setRequestToExecute(new IndexRequest(current.index(), current.type(), current.id())); + context.setRequestToExecute(new IndexRequest(current.index()).id(current.id())); if (failure) { result = new Engine.IndexResult(new OpenSearchException("bla"), 1, 1, 1); } else { diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java index ce35815a296e0..e7e1166eb57fa 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java @@ -58,7 +58,7 @@ public void testBulkRequestModifier() { int numRequests = scaledRandomIntBetween(8, 64); BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numRequests; i++) { - bulkRequest.add(new IndexRequest("_index", "_type", String.valueOf(i)).source("{}", XContentType.JSON)); + bulkRequest.add(new IndexRequest("_index").id(String.valueOf(i)).source("{}", XContentType.JSON)); } CaptureActionListener actionListener = new CaptureActionListener(); TransportBulkAction.BulkRequestModifier bulkRequestModifier = new TransportBulkAction.BulkRequestModifier(bulkRequest); @@ -87,7 +87,6 @@ public void testBulkRequestModifier() { BulkItemResponse item = bulkResponse.getItems()[j]; assertThat(item.isFailed(), is(true)); assertThat(item.getFailure().getIndex(), equalTo("_index")); - assertThat(item.getFailure().getType(), equalTo("_type")); assertThat(item.getFailure().getId(), equalTo(String.valueOf(j))); assertThat(item.getFailure().getMessage(), equalTo("java.lang.RuntimeException")); } else { @@ -99,7 +98,7 @@ public void testBulkRequestModifier() { public void testPipelineFailures() { BulkRequest originalBulkRequest = new BulkRequest(); for (int i = 0; i < 32; i++) { - originalBulkRequest.add(new IndexRequest("index", "type", String.valueOf(i))); + originalBulkRequest.add(new IndexRequest("index").id(String.valueOf(i))); } TransportBulkAction.BulkRequestModifier modifier = new TransportBulkAction.BulkRequestModifier(originalBulkRequest); @@ -128,15 +127,7 @@ public void onFailure(Exception e) {} List originalResponses = new ArrayList<>(); for (DocWriteRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; - IndexResponse indexResponse = new IndexResponse( - new ShardId("index", "_na_", 0), - indexRequest.type(), - indexRequest.id(), - 1, - 17, - 1, - true - ); + IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.id(), 1, 17, 1, true); originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); } bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[originalResponses.size()]), 0)); @@ -150,7 +141,7 @@ public void onFailure(Exception e) {} public void testNoFailures() { BulkRequest originalBulkRequest = new BulkRequest(); for (int i = 0; i < 32; i++) { - originalBulkRequest.add(new IndexRequest("index", "type", String.valueOf(i))); + originalBulkRequest.add(new IndexRequest("index").id(String.valueOf(i))); } TransportBulkAction.BulkRequestModifier modifier = new TransportBulkAction.BulkRequestModifier(originalBulkRequest); diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index b7ba887a0f1e2..239bb19c5f6ad 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.action.document.RestBulkAction; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; @@ -50,7 +49,7 @@ public void testIndexRequest() throws IOException { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}\n"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, indexRequest -> { + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (indexRequest, type) -> { assertFalse(parsed.get()); assertEquals("foo", indexRequest.index()); assertEquals("bar", indexRequest.id()); @@ -68,7 +67,7 @@ public void testIndexRequest() throws IOException { true, false, XContentType.JSON, - indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, + (indexRequest, type) -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -83,7 +82,7 @@ public void testIndexRequest() throws IOException { null, false, XContentType.JSON, - indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, + (indexRequest, type) -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -98,7 +97,7 @@ public void testIndexRequest() throws IOException { true, false, XContentType.JSON, - indexRequest -> { assertFalse(indexRequest.isRequireAlias()); }, + (indexRequest, type) -> { assertFalse(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -108,12 +107,24 @@ public void testDeleteRequest() throws IOException { BytesArray request = new BytesArray("{ \"delete\":{ \"_id\": \"bar\" } }\n"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), deleteRequest -> { - assertFalse(parsed.get()); - assertEquals("foo", deleteRequest.index()); - assertEquals("bar", deleteRequest.id()); - parsed.set(true); - }); + parser.parse( + request, + "foo", + null, + null, + null, + null, + false, + XContentType.JSON, + (req, type) -> fail(), + req -> fail(), + deleteRequest -> { + assertFalse(parsed.get()); + assertEquals("foo", deleteRequest.index()); + assertEquals("bar", deleteRequest.id()); + parsed.set(true); + } + ); assertTrue(parsed.get()); } @@ -121,7 +132,7 @@ public void testUpdateRequest() throws IOException { BytesArray request = new BytesArray("{ \"update\":{ \"_id\": \"bar\" } }\n{}\n"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), updateRequest -> { + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (req, type) -> fail(), updateRequest -> { assertFalse(parsed.get()); assertEquals("foo", updateRequest.index()); assertEquals("bar", updateRequest.id()); @@ -139,7 +150,7 @@ public void testUpdateRequest() throws IOException { true, false, XContentType.JSON, - req -> fail(), + (req, type) -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -154,7 +165,7 @@ public void testUpdateRequest() throws IOException { null, false, XContentType.JSON, - req -> fail(), + (req, type) -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -169,7 +180,7 @@ public void testUpdateRequest() throws IOException { true, false, XContentType.JSON, - req -> fail(), + (req, type) -> fail(), updateRequest -> { assertFalse(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -189,7 +200,7 @@ public void testBarfOnLackOfTrailingNewline() { null, false, XContentType.JSON, - indexRequest -> fail(), + (indexRequest, type) -> fail(), req -> fail(), req -> fail() ) @@ -203,24 +214,34 @@ public void testFailOnExplicitIndex() { IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> parser.parse(request, null, null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), req -> fail()) + () -> parser.parse( + request, + null, + null, + null, + null, + null, + false, + XContentType.JSON, + (req, type) -> fail(), + req -> fail(), + req -> fail() + ) ); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); } - public void testTypeWarning() throws IOException { + public void testTypesStillParsedForBulkMonitoring() throws IOException { BytesArray request = new BytesArray("{ \"index\":{ \"_type\": \"quux\", \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(true); + BulkRequestParser parser = new BulkRequestParser(false); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, indexRequest -> { + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (indexRequest, type) -> { assertFalse(parsed.get()); assertEquals("foo", indexRequest.index()); assertEquals("bar", indexRequest.id()); parsed.set(true); }, req -> fail(), req -> fail()); assertTrue(parsed.get()); - - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testParseDeduplicatesParameterStrings() throws IOException { @@ -230,7 +251,19 @@ public void testParseDeduplicatesParameterStrings() throws IOException { ); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); final List indexRequests = new ArrayList<>(); - parser.parse(request, null, null, null, null, null, true, XContentType.JSON, indexRequests::add, req -> fail(), req -> fail()); + parser.parse( + request, + null, + null, + null, + null, + null, + true, + XContentType.JSON, + (indexRequest, type) -> indexRequests.add(indexRequest), + req -> fail(), + req -> fail() + ); assertThat(indexRequests, Matchers.hasSize(2)); final IndexRequest first = indexRequests.get(0); final IndexRequest second = indexRequests.get(1); diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java index f58567b85be3b..9fd57a78d2097 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java @@ -47,7 +47,6 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.action.document.RestBulkAction; import org.opensearch.script.Script; import org.opensearch.test.OpenSearchTestCase; @@ -76,12 +75,10 @@ public void testSimpleBulk1() throws Exception { assertThat(((IndexRequest) bulkRequest.requests().get(0)).source(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }"))); assertThat(bulkRequest.requests().get(1), instanceOf(DeleteRequest.class)); assertThat(((IndexRequest) bulkRequest.requests().get(2)).source(), equalTo(new BytesArray("{ \"field1\" : \"value3\" }"))); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulkWithCarriageReturn() throws Exception { - String bulkAction = "{ \"index\":{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\"1\"} }\r\n{ \"field1\" : \"value1\" }\r\n"; + String bulkAction = "{ \"index\":{\"_index\":\"test\",\"_id\":\"1\"} }\r\n{ \"field1\" : \"value1\" }\r\n"; BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(1)); @@ -92,8 +89,6 @@ public void testSimpleBulkWithCarriageReturn() throws Exception { XContentType.JSON ).v2(); assertEquals("value1", sourceMap.get("field1")); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk2() throws Exception { @@ -119,7 +114,6 @@ public void testSimpleBulk4() throws Exception { assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2)); assertThat(((UpdateRequest) bulkRequest.requests().get(0)).doc().source().utf8ToString(), equalTo("{\"field\":\"value\"}")); assertThat(bulkRequest.requests().get(1).id(), equalTo("0")); - assertThat(bulkRequest.requests().get(1).type(), equalTo("type1")); assertThat(bulkRequest.requests().get(1).index(), equalTo("index1")); Script script = ((UpdateRequest) bulkRequest.requests().get(1)).script(); assertThat(script, notNullValue()); @@ -130,30 +124,26 @@ public void testSimpleBulk4() throws Exception { assertThat(scriptParams.size(), equalTo(1)); assertThat(scriptParams.get("param1"), equalTo(1)); assertThat(((UpdateRequest) bulkRequest.requests().get(1)).upsertRequest().source().utf8ToString(), equalTo("{\"counter\":1}")); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testBulkAllowExplicitIndex() throws Exception { String bulkAction1 = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk.json"); Exception ex = expectThrows( Exception.class, - () -> new BulkRequest().add(new BytesArray(bulkAction1.getBytes(StandardCharsets.UTF_8)), null, null, false, XContentType.JSON) + () -> new BulkRequest().add(new BytesArray(bulkAction1.getBytes(StandardCharsets.UTF_8)), null, false, XContentType.JSON) ); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk5.json"); - new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), "test", null, false, XContentType.JSON); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); + new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), "test", false, XContentType.JSON); } public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); List> requests = new ArrayList<>(); - requests.add(new IndexRequest("test", "test", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")); - requests.add(new UpdateRequest("test", "test", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")); - requests.add(new DeleteRequest("test", "test", "id")); + requests.add(new IndexRequest("test").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")); + requests.add(new UpdateRequest("test", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")); + requests.add(new DeleteRequest("test", "id")); bulkRequest.add(requests); assertThat(bulkRequest.requests().size(), equalTo(3)); assertThat(bulkRequest.requests().get(0), instanceOf(IndexRequest.class)); @@ -169,8 +159,6 @@ public void testSimpleBulk6() throws Exception { () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) ); assertThat(exc.getMessage(), containsString("Unknown key for a VALUE_STRING in [hello]")); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk7() throws Exception { @@ -184,8 +172,6 @@ public void testSimpleBulk7() throws Exception { exc.getMessage(), containsString("Malformed action/metadata line [5], expected a simple value for field [_unknown] but found [START_ARRAY]") ); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk8() throws Exception { @@ -196,8 +182,6 @@ public void testSimpleBulk8() throws Exception { () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) ); assertThat(exc.getMessage(), containsString("Action/metadata line [3] contains an unknown parameter [_foo]")); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk9() throws Exception { @@ -218,13 +202,10 @@ public void testSimpleBulk10() throws Exception { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(9)); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testBulkActionShouldNotContainArray() throws Exception { - String bulkAction = "{ \"index\":{\"_index\":[\"index1\", \"index2\"],\"_type\":\"type1\",\"_id\":\"1\"} }\r\n" - + "{ \"field1\" : \"value1\" }\r\n"; + String bulkAction = "{ \"index\":{\"_index\":[\"index1\", \"index2\"],\"_id\":\"1\"} }\r\n" + "{ \"field1\" : \"value1\" }\r\n"; BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, @@ -270,12 +251,12 @@ public void testBulkEmptyObject() throws Exception { public void testBulkRequestWithRefresh() throws Exception { BulkRequest bulkRequest = new BulkRequest(); // We force here a "id is missing" validation error - bulkRequest.add(new DeleteRequest("index", "type", null).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new DeleteRequest("index", null).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); // We force here a "type is missing" validation error - bulkRequest.add(new DeleteRequest("index", "", "id")); - bulkRequest.add(new DeleteRequest("index", "type", "id").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); - bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); - bulkRequest.add(new IndexRequest("index", "type", "id").source("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new DeleteRequest("index", "id")); + bulkRequest.add(new DeleteRequest("index", "id").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new UpdateRequest("index", "id").doc("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new IndexRequest("index").id("id").source("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); ActionRequestValidationException validate = bulkRequest.validate(); assertThat(validate, notNullValue()); assertThat(validate.validationErrors(), not(empty())); @@ -284,7 +265,6 @@ public void testBulkRequestWithRefresh() throws Exception { contains( "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", "id is missing", - "type is missing", "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead." @@ -295,8 +275,8 @@ public void testBulkRequestWithRefresh() throws Exception { // issue 15120 public void testBulkNoSource() throws Exception { BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(new UpdateRequest("index", "type", "id")); - bulkRequest.add(new IndexRequest("index", "type", "id")); + bulkRequest.add(new UpdateRequest("index", "id")); + bulkRequest.add(new IndexRequest("index").id("id")); ActionRequestValidationException validate = bulkRequest.validate(); assertThat(validate, notNullValue()); assertThat(validate.validationErrors(), not(empty())); @@ -318,7 +298,6 @@ public void testSmileIsSupported() throws IOException { builder.startObject(); builder.startObject("index"); builder.field("_index", "index"); - builder.field("_type", "type"); builder.field("_id", "test"); builder.endObject(); builder.endObject(); @@ -334,19 +313,16 @@ public void testSmileIsSupported() throws IOException { } BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(data, null, null, xContentType); + bulkRequest.add(data, null, xContentType); assertEquals(1, bulkRequest.requests().size()); DocWriteRequest docWriteRequest = bulkRequest.requests().get(0); assertEquals(DocWriteRequest.OpType.INDEX, docWriteRequest.opType()); assertEquals("index", docWriteRequest.index()); - assertEquals("type", docWriteRequest.type()); assertEquals("test", docWriteRequest.id()); assertThat(docWriteRequest, instanceOf(IndexRequest.class)); IndexRequest request = (IndexRequest) docWriteRequest; assertEquals(1, request.sourceAsMap().size()); assertEquals("value", request.sourceAsMap().get("field")); - // This test's content contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testToValidateUpsertRequestAndCASInBulkRequest() throws IOException { @@ -357,7 +333,6 @@ public void testToValidateUpsertRequestAndCASInBulkRequest() throws IOException builder.startObject(); builder.startObject("update"); builder.field("_index", "index"); - builder.field("_type", "type"); builder.field("_id", "id"); builder.field("if_seq_no", 1L); builder.field("if_primary_term", 100L); @@ -372,7 +347,6 @@ public void testToValidateUpsertRequestAndCASInBulkRequest() throws IOException values.put("if_seq_no", 1L); values.put("if_primary_term", 100L); values.put("_index", "index"); - values.put("_type", "type"); builder.field("upsert", values); builder.endObject(); } @@ -380,10 +354,8 @@ public void testToValidateUpsertRequestAndCASInBulkRequest() throws IOException data = out.bytes(); } BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(data, null, null, xContentType); + bulkRequest.add(data, null, xContentType); assertThat(bulkRequest.validate().validationErrors(), contains("upsert requests don't support `if_seq_no` and `if_primary_term`")); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testBulkTerminatedByNewline() throws Exception { @@ -404,7 +376,5 @@ public void testBulkTerminatedByNewline() throws Exception { XContentType.JSON ); assertEquals(3, bulkRequestWithNewLine.numberOfActions()); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } } diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java index 55fca8fc736db..e768d66ee04ce 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java @@ -86,17 +86,16 @@ public void testToAndFromXContent() throws IOException { expectedBulkItems[i] = new BulkItemResponse(i, opType, randomDocWriteResponses.v2()); } else { String index = randomAlphaOfLength(5); - String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); Tuple failures = randomExceptions(); Exception bulkItemCause = (Exception) failures.v1(); - bulkItems[i] = new BulkItemResponse(i, opType, new BulkItemResponse.Failure(index, type, id, bulkItemCause)); + bulkItems[i] = new BulkItemResponse(i, opType, new BulkItemResponse.Failure(index, id, bulkItemCause)); expectedBulkItems[i] = new BulkItemResponse( i, opType, - new BulkItemResponse.Failure(index, type, id, failures.v2(), ExceptionsHelper.status(bulkItemCause)) + new BulkItemResponse.Failure(index, id, failures.v2(), ExceptionsHelper.status(bulkItemCause)) ); } } diff --git a/server/src/test/java/org/opensearch/action/bulk/RetryTests.java b/server/src/test/java/org/opensearch/action/bulk/RetryTests.java index 8a66e0cf6f751..d3280ede6ce15 100644 --- a/server/src/test/java/org/opensearch/action/bulk/RetryTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/RetryTests.java @@ -87,11 +87,11 @@ public void tearDown() throws Exception { private BulkRequest createBulkRequest() { BulkRequest request = new BulkRequest(); - request.add(new UpdateRequest("shop", "products", "1")); - request.add(new UpdateRequest("shop", "products", "2")); - request.add(new UpdateRequest("shop", "products", "3")); - request.add(new UpdateRequest("shop", "products", "4")); - request.add(new UpdateRequest("shop", "products", "5")); + request.add(new UpdateRequest("shop", "1")); + request.add(new UpdateRequest("shop", "2")); + request.add(new UpdateRequest("shop", "3")); + request.add(new UpdateRequest("shop", "4")); + request.add(new UpdateRequest("shop", "5")); return request; } @@ -238,18 +238,14 @@ public void bulk(BulkRequest request, ActionListener listener) { } private BulkItemResponse successfulResponse() { - return new BulkItemResponse( - 1, - OpType.DELETE, - new DeleteResponse(new ShardId("test", "test", 0), "_doc", "test", 0, 0, 0, false) - ); + return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse(new ShardId("test", "test", 0), "test", 0, 0, 0, false)); } private BulkItemResponse failedResponse() { return new BulkItemResponse( 1, OpType.INDEX, - new BulkItemResponse.Failure("test", "test", "1", new OpenSearchRejectedExecutionException("pool full")) + new BulkItemResponse.Failure("test", "1", new OpenSearchRejectedExecutionException("pool full")) ); } } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index b2f6ce885d242..32e9dd44008cd 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -79,7 +79,7 @@ public void testNonExceptional() { bulkRequest.add(new IndexRequest(randomAlphaOfLength(5))); bulkRequest.add(new IndexRequest(randomAlphaOfLength(5))); bulkRequest.add(new DeleteRequest(randomAlphaOfLength(5))); - bulkRequest.add(new UpdateRequest(randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5))); + bulkRequest.add(new UpdateRequest(randomAlphaOfLength(5), randomAlphaOfLength(5))); // Test emulating auto_create_index=false indicesThatCannotBeCreatedTestCase(emptySet(), bulkRequest, null); // Test emulating auto_create_index=true @@ -95,7 +95,7 @@ public void testAllFail() { bulkRequest.add(new IndexRequest("no")); bulkRequest.add(new IndexRequest("can't")); bulkRequest.add(new DeleteRequest("do").version(0).versionType(VersionType.EXTERNAL)); - bulkRequest.add(new UpdateRequest("nothin", randomAlphaOfLength(5), randomAlphaOfLength(5))); + bulkRequest.add(new UpdateRequest("nothin", randomAlphaOfLength(5))); indicesThatCannotBeCreatedTestCase( new HashSet<>(Arrays.asList("no", "can't", "do", "nothin")), bulkRequest, diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 8a804c5d7519e..4b98870422ce8 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -290,7 +290,7 @@ public void setupAction() { public void testIngestSkipped() throws Exception { BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index").id("id"); indexRequest.source(emptyMap()); bulkRequest.add(indexRequest); action.execute(null, bulkRequest, ActionListener.wrap(response -> {}, exception -> { throw new AssertionError(exception); })); @@ -299,7 +299,7 @@ public void testIngestSkipped() throws Exception { } public void testSingleItemBulkActionIngestSkipped() throws Exception { - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index").id("id"); indexRequest.source(emptyMap()); singleItemBulkWriteAction.execute( null, @@ -313,10 +313,10 @@ public void testSingleItemBulkActionIngestSkipped() throws Exception { public void testIngestLocal() throws Exception { Exception exception = new Exception("fake exception"); BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest1 = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest1 = new IndexRequest("index").id("id"); indexRequest1.source(emptyMap()); indexRequest1.setPipeline("testpipeline"); - IndexRequest indexRequest2 = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest2 = new IndexRequest("index").id("id"); indexRequest2.source(emptyMap()); indexRequest2.setPipeline("testpipeline"); bulkRequest.add(indexRequest1); @@ -360,7 +360,7 @@ public void testIngestLocal() throws Exception { public void testSingleItemBulkActionIngestLocal() throws Exception { Exception exception = new Exception("fake exception"); - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index").id("id"); indexRequest.source(emptyMap()); indexRequest.setPipeline("testpipeline"); AtomicBoolean responseCalled = new AtomicBoolean(false); @@ -444,7 +444,7 @@ public void testIngestSystemLocal() throws Exception { public void testIngestForward() throws Exception { localIngest = false; BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index").id("id"); indexRequest.source(emptyMap()); indexRequest.setPipeline("testpipeline"); bulkRequest.add(indexRequest); @@ -485,7 +485,7 @@ public void testIngestForward() throws Exception { public void testSingleItemBulkActionIngestForward() throws Exception { localIngest = false; - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index").id("id"); indexRequest.source(emptyMap()); indexRequest.setPipeline("testpipeline"); IndexResponse indexResponse = mock(IndexResponse.class); @@ -527,11 +527,11 @@ public void testSingleItemBulkActionIngestForward() throws Exception { } public void testUseDefaultPipeline() throws Exception { - validateDefaultPipeline(new IndexRequest(WITH_DEFAULT_PIPELINE, "type", "id")); + validateDefaultPipeline(new IndexRequest(WITH_DEFAULT_PIPELINE).id("id")); } public void testUseDefaultPipelineWithAlias() throws Exception { - validateDefaultPipeline(new IndexRequest(WITH_DEFAULT_PIPELINE_ALIAS, "type", "id")); + validateDefaultPipeline(new IndexRequest(WITH_DEFAULT_PIPELINE_ALIAS).id("id")); } public void testUseDefaultPipelineWithBulkUpsert() throws Exception { @@ -547,15 +547,14 @@ public void testUseDefaultPipelineWithBulkUpsertWithAlias() throws Exception { private void validatePipelineWithBulkUpsert(@Nullable String indexRequestIndexName, String updateRequestIndexName) throws Exception { Exception exception = new Exception("fake exception"); BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest1 = new IndexRequest(indexRequestIndexName, "type", "id1").source(emptyMap()); - IndexRequest indexRequest2 = new IndexRequest(indexRequestIndexName, "type", "id2").source(emptyMap()); - IndexRequest indexRequest3 = new IndexRequest(indexRequestIndexName, "type", "id3").source(emptyMap()); - UpdateRequest upsertRequest = new UpdateRequest(updateRequestIndexName, "type", "id1").upsert(indexRequest1) - .script(mockScript("1")); - UpdateRequest docAsUpsertRequest = new UpdateRequest(updateRequestIndexName, "type", "id2").doc(indexRequest2).docAsUpsert(true); + IndexRequest indexRequest1 = new IndexRequest(indexRequestIndexName).id("id1").source(emptyMap()); + IndexRequest indexRequest2 = new IndexRequest(indexRequestIndexName).id("id2").source(emptyMap()); + IndexRequest indexRequest3 = new IndexRequest(indexRequestIndexName).id("id3").source(emptyMap()); + UpdateRequest upsertRequest = new UpdateRequest(updateRequestIndexName, "id1").upsert(indexRequest1).script(mockScript("1")); + UpdateRequest docAsUpsertRequest = new UpdateRequest(updateRequestIndexName, "id2").doc(indexRequest2).docAsUpsert(true); // this test only covers the mechanics that scripted bulk upserts will execute a default pipeline. However, in practice scripted // bulk upserts with a default pipeline are a bit surprising since the script executes AFTER the pipeline. - UpdateRequest scriptedUpsert = new UpdateRequest(updateRequestIndexName, "type", "id2").upsert(indexRequest3) + UpdateRequest scriptedUpsert = new UpdateRequest(updateRequestIndexName, "id2").upsert(indexRequest3) .script(mockScript("1")) .scriptedUpsert(true); bulkRequest.add(upsertRequest).add(docAsUpsertRequest).add(scriptedUpsert); @@ -604,7 +603,7 @@ private void validatePipelineWithBulkUpsert(@Nullable String indexRequestIndexNa public void testDoExecuteCalledTwiceCorrectly() throws Exception { Exception exception = new Exception("fake exception"); - IndexRequest indexRequest = new IndexRequest("missing_index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("missing_index").id("id"); indexRequest.setPipeline("testpipeline"); indexRequest.source(emptyMap()); AtomicBoolean responseCalled = new AtomicBoolean(false); @@ -644,7 +643,7 @@ public void testDoExecuteCalledTwiceCorrectly() throws Exception { public void testNotFindDefaultPipelineFromTemplateMatches() { Exception exception = new Exception("fake exception"); - IndexRequest indexRequest = new IndexRequest("missing_index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("missing_index").id("id"); indexRequest.source(emptyMap()); AtomicBoolean responseCalled = new AtomicBoolean(false); AtomicBoolean failureCalled = new AtomicBoolean(false); @@ -698,7 +697,7 @@ public void testFindDefaultPipelineFromTemplateMatch() { when(metadata.getTemplates()).thenReturn(templateMetadataBuilder.build()); when(metadata.indices()).thenReturn(ImmutableOpenMap.of()); - IndexRequest indexRequest = new IndexRequest("missing_index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("missing_index").id("id"); indexRequest.source(emptyMap()); AtomicBoolean responseCalled = new AtomicBoolean(false); AtomicBoolean failureCalled = new AtomicBoolean(false); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 0ce8a2fc1a2ed..5eb395cb05971 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -168,7 +168,7 @@ public void tearDown() throws Exception { } public void testDeleteNonExistingDocDoesNotCreateIndex() throws Exception { - BulkRequest bulkRequest = new BulkRequest().add(new DeleteRequest("index", "type", "id")); + BulkRequest bulkRequest = new BulkRequest().add(new DeleteRequest("index", "id")); PlainActionFuture future = PlainActionFuture.newFuture(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); @@ -183,9 +183,7 @@ public void testDeleteNonExistingDocDoesNotCreateIndex() throws Exception { } public void testDeleteNonExistingDocExternalVersionCreatesIndex() throws Exception { - BulkRequest bulkRequest = new BulkRequest().add( - new DeleteRequest("index", "type", "id").versionType(VersionType.EXTERNAL).version(0) - ); + BulkRequest bulkRequest = new BulkRequest().add(new DeleteRequest("index", "id").versionType(VersionType.EXTERNAL).version(0)); PlainActionFuture future = PlainActionFuture.newFuture(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); @@ -194,9 +192,7 @@ public void testDeleteNonExistingDocExternalVersionCreatesIndex() throws Excepti } public void testDeleteNonExistingDocExternalGteVersionCreatesIndex() throws Exception { - BulkRequest bulkRequest = new BulkRequest().add( - new DeleteRequest("index2", "type", "id").versionType(VersionType.EXTERNAL_GTE).version(0) - ); + BulkRequest bulkRequest = new BulkRequest().add(new DeleteRequest("index2", "id").versionType(VersionType.EXTERNAL_GTE).version(0)); PlainActionFuture future = PlainActionFuture.newFuture(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); @@ -205,12 +201,10 @@ public void testDeleteNonExistingDocExternalGteVersionCreatesIndex() throws Exce } public void testGetIndexWriteRequest() throws Exception { - IndexRequest indexRequest = new IndexRequest("index", "type", "id1").source(emptyMap()); - UpdateRequest upsertRequest = new UpdateRequest("index", "type", "id1").upsert(indexRequest).script(mockScript("1")); - UpdateRequest docAsUpsertRequest = new UpdateRequest("index", "type", "id2").doc(indexRequest).docAsUpsert(true); - UpdateRequest scriptedUpsert = new UpdateRequest("index", "type", "id2").upsert(indexRequest) - .script(mockScript("1")) - .scriptedUpsert(true); + IndexRequest indexRequest = new IndexRequest("index").id("id1").source(emptyMap()); + UpdateRequest upsertRequest = new UpdateRequest("index", "id1").upsert(indexRequest).script(mockScript("1")); + UpdateRequest docAsUpsertRequest = new UpdateRequest("index", "id2").doc(indexRequest).docAsUpsert(true); + UpdateRequest scriptedUpsert = new UpdateRequest("index", "id2").upsert(indexRequest).script(mockScript("1")).scriptedUpsert(true); assertEquals(TransportBulkAction.getIndexWriteRequest(indexRequest), indexRequest); assertEquals(TransportBulkAction.getIndexWriteRequest(upsertRequest), indexRequest); @@ -220,7 +214,7 @@ public void testGetIndexWriteRequest() throws Exception { DeleteRequest deleteRequest = new DeleteRequest("index", "id"); assertNull(TransportBulkAction.getIndexWriteRequest(deleteRequest)); - UpdateRequest badUpsertRequest = new UpdateRequest("index", "type", "id1"); + UpdateRequest badUpsertRequest = new UpdateRequest("index", "id1"); assertNull(TransportBulkAction.getIndexWriteRequest(badUpsertRequest)); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index e2e4f4dd5daab..713b506f14299 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -55,7 +55,6 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; -import org.opensearch.rest.action.document.RestBulkAction; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchTestCase; @@ -242,8 +241,6 @@ public void onFailure(Exception e) { } }); - // This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } static class Resolver extends IndexNameExpressionResolver { diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index 223ef3795d5e5..733d09126004b 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -122,8 +122,7 @@ public void testExecuteBulkIndexRequest() throws Exception { BulkItemRequest[] items = new BulkItemRequest[1]; boolean create = randomBoolean(); - DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE) - .create(create); + DocWriteRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE).create(create); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -154,7 +153,7 @@ public void testExecuteBulkIndexRequest() throws Exception { // Assert that the document actually made it there assertDocCount(shard, 1); - writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE).create(true); + writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE).create(true); primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -186,7 +185,6 @@ public void testExecuteBulkIndexRequest() throws Exception { BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause().getClass(), equalTo(VersionConflictEngineException.class)); assertThat(failure.getCause().getMessage(), containsString("version conflict, document already exists (current version [1])")); @@ -204,7 +202,8 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { BulkItemRequest[] items = new BulkItemRequest[randomIntBetween(2, 5)]; for (int i = 0; i < items.length; i++) { - DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id_" + i).source(Requests.INDEX_CONTENT_TYPE) + DocWriteRequest writeRequest = new IndexRequest("index").id("id_" + i) + .source(Requests.INDEX_CONTENT_TYPE) .opType(DocWriteRequest.OpType.INDEX); items[i] = new BulkItemRequest(i, writeRequest); } @@ -236,7 +235,6 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { BulkItemResponse response = result.finalResponseIfSuccessful.getResponses()[i]; assertThat(response.getItemId(), equalTo(i)); assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("_doc")); assertThat(response.getId(), equalTo("id_" + i)); assertThat(response.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); if (response.getItemId() == rejectItem.id()) { @@ -266,11 +264,7 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source( - Requests.INDEX_CONTENT_TYPE, - "foo", - "bar" - ); + DocWriteRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -344,11 +338,7 @@ public void testExecuteBulkIndexRequestWithErrorWhileUpdatingMapping() throws Ex IndexShard shard = newStartedShard(true); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source( - Requests.INDEX_CONTENT_TYPE, - "foo", - "bar" - ); + DocWriteRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -394,7 +384,6 @@ public void onFailure(final Exception e) { assertThat(primaryResponse.getFailureMessage(), containsString("some kind of exception")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); @@ -405,7 +394,7 @@ public void testExecuteBulkDeleteRequest() throws Exception { IndexShard shard = newStartedShard(true); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new DeleteRequest("index", "_doc", "id"); + DocWriteRequest writeRequest = new DeleteRequest("index", "id"); items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -444,7 +433,6 @@ public void testExecuteBulkDeleteRequest() throws Exception { assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOT_FOUND)); assertThat(response.getShardId(), equalTo(shard.shardId())); assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("_doc")); assertThat(response.getId(), equalTo("id")); assertThat(response.getVersion(), equalTo(1L)); assertThat(response.getSeqNo(), equalTo(0L)); @@ -453,7 +441,7 @@ public void testExecuteBulkDeleteRequest() throws Exception { // Now do the same after indexing the document, it should now find and delete the document indexDoc(shard, "_doc", "id", "{}"); - writeRequest = new DeleteRequest("index", "_doc", "id"); + writeRequest = new DeleteRequest("index", "id"); items[0] = new BulkItemRequest(0, writeRequest); bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -492,7 +480,6 @@ public void testExecuteBulkDeleteRequest() throws Exception { assertThat(response.getResult(), equalTo(DocWriteResponse.Result.DELETED)); assertThat(response.getShardId(), equalTo(shard.shardId())); assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("_doc")); assertThat(response.getId(), equalTo("id")); assertThat(response.getVersion(), equalTo(3L)); assertThat(response.getSeqNo(), equalTo(2L)); @@ -503,14 +490,10 @@ public void testExecuteBulkDeleteRequest() throws Exception { } public void testNoopUpdateRequest() throws Exception { - DocWriteRequest writeRequest = new UpdateRequest("index", "_doc", "id").doc( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ); + DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "_doc", "id", 0, 2, 1, DocWriteResponse.Result.NOOP); + DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "id", 0, 2, 1, DocWriteResponse.Result.NOOP); IndexShard shard = mock(IndexShard.class); @@ -556,14 +539,10 @@ public void testNoopUpdateRequest() throws Exception { public void testUpdateRequestWithFailure() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); - DocWriteRequest writeRequest = new UpdateRequest("index", "_doc", "id").doc( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ); + DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); Exception err = new OpenSearchException("I'm dead <(x.x)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0); @@ -610,7 +589,6 @@ public void testUpdateRequestWithFailure() throws Exception { assertThat(primaryResponse.getFailureMessage(), containsString("I'm dead <(x.x)>")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); @@ -618,14 +596,10 @@ public void testUpdateRequestWithFailure() throws Exception { public void testUpdateRequestWithConflictFailure() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); - DocWriteRequest writeRequest = new UpdateRequest("index", "_doc", "id").doc( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ); + DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0); @@ -670,7 +644,6 @@ public void testUpdateRequestWithConflictFailure() throws Exception { assertThat(primaryResponse.getFailureMessage(), containsString("I'm conflicted <(;_;)>")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); @@ -678,14 +651,10 @@ public void testUpdateRequestWithConflictFailure() throws Exception { public void testUpdateRequestWithSuccess() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); - DocWriteRequest writeRequest = new UpdateRequest("index", "_doc", "id").doc( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ); + DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); boolean created = randomBoolean(); Translog.Location resultLocation = new Translog.Location(42, 42, 42); @@ -739,14 +708,10 @@ public void testUpdateRequestWithSuccess() throws Exception { public void testUpdateWithDelete() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); - DocWriteRequest writeRequest = new UpdateRequest("index", "_doc", "id").doc( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ); + DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - DeleteRequest updateResponse = new DeleteRequest("index", "_doc", "id"); + DeleteRequest updateResponse = new DeleteRequest("index", "id"); boolean found = randomBoolean(); Translog.Location resultLocation = new Translog.Location(42, 42, 42); @@ -796,11 +761,7 @@ public void testUpdateWithDelete() throws Exception { } public void testFailureDuringUpdateProcessing() throws Exception { - DocWriteRequest writeRequest = new UpdateRequest("index", "_doc", "id").doc( - Requests.INDEX_CONTENT_TYPE, - "field", - "value" - ); + DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); IndexShard shard = mock(IndexShard.class); @@ -833,7 +794,6 @@ public void testFailureDuringUpdateProcessing() throws Exception { assertThat(primaryResponse.getFailureMessage(), containsString("oops")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); @@ -844,7 +804,8 @@ public void testTranslogPositionToSync() throws Exception { BulkItemRequest[] items = new BulkItemRequest[randomIntBetween(2, 5)]; for (int i = 0; i < items.length; i++) { - DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id_" + i).source(Requests.INDEX_CONTENT_TYPE) + DocWriteRequest writeRequest = new IndexRequest("index").id("id_" + i) + .source(Requests.INDEX_CONTENT_TYPE) .opType(DocWriteRequest.OpType.INDEX); items[i] = new BulkItemRequest(i, writeRequest); } @@ -881,14 +842,14 @@ public void testTranslogPositionToSync() throws Exception { public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception { final IndexShard shard = spy(newStartedShard(false)); - BulkItemRequest itemRequest = new BulkItemRequest(0, new IndexRequest("index", "_doc").source(Requests.INDEX_CONTENT_TYPE)); + BulkItemRequest itemRequest = new BulkItemRequest(0, new IndexRequest("index").source(Requests.INDEX_CONTENT_TYPE)); final String failureMessage = "simulated primary failure"; final IOException exception = new IOException(failureMessage); itemRequest.setPrimaryResponse( new BulkItemResponse( 0, randomFrom(DocWriteRequest.OpType.CREATE, DocWriteRequest.OpType.DELETE, DocWriteRequest.OpType.INDEX), - new BulkItemResponse.Failure("index", "_doc", "1", exception, 1L, 1L) + new BulkItemResponse.Failure("index", "1", exception, 1L, 1L) ) ); BulkItemRequest[] itemRequests = new BulkItemRequest[1]; @@ -901,12 +862,12 @@ public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception { public void testRetries() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); - UpdateRequest writeRequest = new UpdateRequest("index", "_doc", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); + UpdateRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); // the beating will continue until success has come. writeRequest.retryOnConflict(Integer.MAX_VALUE); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0); @@ -1084,7 +1045,7 @@ private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { new BulkItemResponse( 0, DocWriteRequest.OpType.INDEX, - new IndexResponse(shardId, "_doc", "ignore-primary-response-on-primary", 42, 42, 42, false) + new IndexResponse(shardId, "ignore-primary-response-on-primary", 42, 42, 42, false) ) ); } diff --git a/server/src/test/java/org/opensearch/action/delete/DeleteRequestTests.java b/server/src/test/java/org/opensearch/action/delete/DeleteRequestTests.java index acd07b781be0a..0dda8969e7d74 100644 --- a/server/src/test/java/org/opensearch/action/delete/DeleteRequestTests.java +++ b/server/src/test/java/org/opensearch/action/delete/DeleteRequestTests.java @@ -42,23 +42,14 @@ public class DeleteRequestTests extends OpenSearchTestCase { public void testValidation() { { - final DeleteRequest request = new DeleteRequest("index4", "_doc", "0"); + final DeleteRequest request = new DeleteRequest("index4", "0"); final ActionRequestValidationException validate = request.validate(); assertThat(validate, nullValue()); } { - // Empty types are accepted but fail validation - final DeleteRequest request = new DeleteRequest("index4", "", randomBoolean() ? "" : null); - final ActionRequestValidationException validate = request.validate(); - - assertThat(validate, not(nullValue())); - assertThat(validate.validationErrors(), hasItems("type is missing", "id is missing")); - } - { - // Null types are defaulted - final DeleteRequest request = new DeleteRequest("index4", randomBoolean() ? "" : null); + final DeleteRequest request = new DeleteRequest("index4", null); final ActionRequestValidationException validate = request.validate(); assertThat(validate, not(nullValue())); diff --git a/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java b/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java index 5b2a1d61614cb..e6c80b8ebdb61 100644 --- a/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java @@ -55,21 +55,21 @@ public class DeleteResponseTests extends OpenSearchTestCase { public void testToXContent() { { - DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "type", "id", 3, 17, 5, true); + DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "id", 3, 17, 5, true); String output = Strings.toString(response); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":5,\"result\":\"deleted\"," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":5,\"result\":\"deleted\"," + "\"_shards\":null,\"_seq_no\":3,\"_primary_term\":17}", output ); } { - DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "type", "id", -1, 0, 7, true); + DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "id", -1, 0, 7, true); response.setForcedRefresh(true); response.setShardInfo(new ReplicationResponse.ShardInfo(10, 5)); String output = Strings.toString(response); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":7,\"result\":\"deleted\"," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":7,\"result\":\"deleted\"," + "\"forced_refresh\":true,\"_shards\":{\"total\":10,\"successful\":5,\"failed\":0}}", output ); @@ -141,19 +141,11 @@ public static Tuple randomDeleteResponse() { Tuple shardInfos = RandomObjects.randomShardInfo(random()); - DeleteResponse actual = new DeleteResponse(new ShardId(index, indexUUid, shardId), type, id, seqNo, primaryTerm, version, found); + DeleteResponse actual = new DeleteResponse(new ShardId(index, indexUUid, shardId), id, seqNo, primaryTerm, version, found); actual.setForcedRefresh(forcedRefresh); actual.setShardInfo(shardInfos.v1()); - DeleteResponse expected = new DeleteResponse( - new ShardId(index, INDEX_UUID_NA_VALUE, -1), - type, - id, - seqNo, - primaryTerm, - version, - found - ); + DeleteResponse expected = new DeleteResponse(new ShardId(index, INDEX_UUID_NA_VALUE, -1), id, seqNo, primaryTerm, version, found); expected.setForcedRefresh(forcedRefresh); expected.setShardInfo(shardInfos.v2()); diff --git a/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java b/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java index 8dda2a94a0a76..6642f5c10522e 100644 --- a/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java +++ b/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java @@ -68,9 +68,9 @@ public void setUp() throws Exception { public void testSerialize() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { - ExplainRequest request = new ExplainRequest("index", "type", "id"); + ExplainRequest request = new ExplainRequest("index", "id"); request.fetchSourceContext(new FetchSourceContext(true, new String[] { "field1.*" }, new String[] { "field2.*" })); - request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] { "alias0", "alias1" })); + request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), "alias0", "alias1")); request.preference("the_preference"); request.query(QueryBuilders.termQuery("field", "value")); request.storedFields(new String[] { "field1", "field2" }); @@ -90,7 +90,7 @@ public void testSerialize() throws IOException { public void testValidation() { { - final ExplainRequest request = new ExplainRequest("index4", "_doc", "0"); + final ExplainRequest request = new ExplainRequest("index4", "0"); request.query(QueryBuilders.termQuery("field", "value")); final ActionRequestValidationException validate = request.validate(); @@ -99,12 +99,12 @@ public void testValidation() { } { - final ExplainRequest request = new ExplainRequest("index4", randomBoolean() ? "" : null, randomBoolean() ? "" : null); + final ExplainRequest request = new ExplainRequest("index4", randomBoolean() ? "" : null); request.query(QueryBuilders.termQuery("field", "value")); final ActionRequestValidationException validate = request.validate(); assertThat(validate, not(nullValue())); - assertThat(validate.validationErrors(), hasItems("type is missing", "id is missing")); + assertThat(validate.validationErrors(), hasItems("id is missing")); } } } diff --git a/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java b/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java index 8b95b06b0ee8b..b6918bd3590a9 100644 --- a/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java +++ b/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java @@ -71,14 +71,12 @@ protected Writeable.Reader instanceReader() { @Override protected ExplainResponse createTestInstance() { String index = randomAlphaOfLength(5); - String type = randomAlphaOfLength(5); String id = String.valueOf(randomIntBetween(1, 100)); boolean exist = randomBoolean(); Explanation explanation = randomExplanation(randomExplanation(randomExplanation()), randomExplanation()); String fieldName = randomAlphaOfLength(10); List values = Arrays.asList(randomAlphaOfLengthBetween(3, 10), randomInt(), randomLong(), randomDouble(), randomBoolean()); GetResult getResult = new GetResult( - randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), 0, @@ -89,7 +87,7 @@ protected ExplainResponse createTestInstance() { singletonMap(fieldName, new DocumentField(fieldName, values)), null ); - return new ExplainResponse(index, type, id, exist, explanation, getResult); + return new ExplainResponse(index, id, exist, explanation, getResult); } @Override @@ -104,7 +102,6 @@ public void testToXContent() throws IOException { boolean exist = true; Explanation explanation = Explanation.match(1.0f, "description", Collections.emptySet()); GetResult getResult = new GetResult( - null, null, null, 0, @@ -115,7 +112,7 @@ public void testToXContent() throws IOException { singletonMap("field1", new DocumentField("field1", singletonList("value1"))), null ); - ExplainResponse response = new ExplainResponse(index, type, id, exist, explanation, getResult); + ExplainResponse response = new ExplainResponse(index, id, exist, explanation, getResult); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); response.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -124,7 +121,6 @@ public void testToXContent() throws IOException { String expectedResponse = ("{\n" + " \"_index\":\"index\",\n" - + " \"_type\":\"type\",\n" + " \"_id\":\"1\",\n" + " \"matched\":true,\n" + " \"explanation\":{\n" diff --git a/server/src/test/java/org/opensearch/action/get/GetRequestTests.java b/server/src/test/java/org/opensearch/action/get/GetRequestTests.java index 91fcf57be8bef..13d12cdc8af87 100644 --- a/server/src/test/java/org/opensearch/action/get/GetRequestTests.java +++ b/server/src/test/java/org/opensearch/action/get/GetRequestTests.java @@ -42,19 +42,19 @@ public class GetRequestTests extends OpenSearchTestCase { public void testValidation() { { - final GetRequest request = new GetRequest("index4", "_doc", "0"); + final GetRequest request = new GetRequest("index4", "0"); final ActionRequestValidationException validate = request.validate(); assertThat(validate, nullValue()); } { - final GetRequest request = new GetRequest("index4", randomBoolean() ? "" : null, randomBoolean() ? "" : null); + final GetRequest request = new GetRequest("index4", randomBoolean() ? "" : null); final ActionRequestValidationException validate = request.validate(); assertThat(validate, not(nullValue())); - assertEquals(2, validate.validationErrors().size()); - assertThat(validate.validationErrors(), hasItems("type is missing", "id is missing")); + assertEquals(1, validate.validationErrors().size()); + assertThat(validate.validationErrors(), hasItems("id is missing")); } } } diff --git a/server/src/test/java/org/opensearch/action/get/GetResponseTests.java b/server/src/test/java/org/opensearch/action/get/GetResponseTests.java index 108eeda79e173..39b330fa10a7b 100644 --- a/server/src/test/java/org/opensearch/action/get/GetResponseTests.java +++ b/server/src/test/java/org/opensearch/action/get/GetResponseTests.java @@ -108,7 +108,6 @@ public void testToXContent() { GetResponse getResponse = new GetResponse( new GetResult( "index", - "type", "id", 0, 1, @@ -121,17 +120,15 @@ public void testToXContent() { ); String output = Strings.toString(getResponse); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", output ); } { - GetResponse getResponse = new GetResponse( - new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null) - ); + GetResponse getResponse = new GetResponse(new GetResult("index", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null)); String output = Strings.toString(getResponse); - assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"found\":false}", output); + assertEquals("{\"_index\":\"index\",\"_id\":\"id\",\"found\":false}", output); } } @@ -139,7 +136,6 @@ public void testToString() { GetResponse getResponse = new GetResponse( new GetResult( "index", - "type", "id", 0, 1, @@ -151,7 +147,7 @@ public void testToString() { ) ); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", getResponse.toString() ); @@ -167,7 +163,7 @@ public void testEqualsAndHashcode() { public void testFromXContentThrowsParsingException() throws IOException { GetResponse getResponse = new GetResponse( - new GetResult(null, null, null, UNASSIGNED_SEQ_NO, 0, randomIntBetween(1, 5), randomBoolean(), null, null, null) + new GetResult(null, null, UNASSIGNED_SEQ_NO, 0, randomIntBetween(1, 5), randomBoolean(), null, null, null) ); XContentType xContentType = randomFrom(XContentType.values()); @@ -175,7 +171,7 @@ public void testFromXContentThrowsParsingException() throws IOException { try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { ParsingException exception = expectThrows(ParsingException.class, () -> GetResponse.fromXContent(parser)); - assertEquals("Missing required fields [_index,_type,_id]", exception.getMessage()); + assertEquals("Missing required fields [_index,_id]", exception.getMessage()); } } diff --git a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java index 54432fa2fb9fb..cf26117210dfb 100644 --- a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java @@ -71,9 +71,8 @@ public void testAddWithInvalidKey() throws IOException { final MultiGetRequest mgr = new MultiGetRequest(); final ParsingException e = expectThrows(ParsingException.class, () -> { final String defaultIndex = randomAlphaOfLength(5); - final String defaultType = randomAlphaOfLength(3); final FetchSourceContext fetchSource = FetchSourceContext.FETCH_SOURCE; - mgr.add(defaultIndex, defaultType, null, fetchSource, null, parser, true); + mgr.add(defaultIndex, null, fetchSource, null, parser, true); }); assertThat(e.toString(), containsString("unknown key [doc] for a START_ARRAY, expected [docs] or [ids]")); } @@ -95,9 +94,8 @@ public void testUnexpectedField() throws IOException { final MultiGetRequest mgr = new MultiGetRequest(); final ParsingException e = expectThrows(ParsingException.class, () -> { final String defaultIndex = randomAlphaOfLength(5); - final String defaultType = randomAlphaOfLength(3); final FetchSourceContext fetchSource = FetchSourceContext.FETCH_SOURCE; - mgr.add(defaultIndex, defaultType, null, fetchSource, null, parser, true); + mgr.add(defaultIndex, null, fetchSource, null, parser, true); }); assertThat(e.toString(), containsString("unexpected token [START_OBJECT], expected [FIELD_NAME] or [START_ARRAY]")); } @@ -118,7 +116,7 @@ public void testAddWithValidSourceValueIsAccepted() throws Exception { ); MultiGetRequest multiGetRequest = new MultiGetRequest(); - multiGetRequest.add(randomAlphaOfLength(5), randomAlphaOfLength(3), null, FetchSourceContext.FETCH_SOURCE, null, parser, true); + multiGetRequest.add(randomAlphaOfLength(5), null, FetchSourceContext.FETCH_SOURCE, null, parser, true); assertEquals(2, multiGetRequest.getItems().size()); } @@ -130,7 +128,7 @@ public void testXContentSerialization() throws IOException { BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); try (XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled)) { MultiGetRequest actual = new MultiGetRequest(); - actual.add(null, null, null, null, null, parser, true); + actual.add(null, null, null, null, parser, true); assertThat(parser.nextToken(), nullValue()); assertThat(actual.items.size(), equalTo(expected.items.size())); @@ -147,7 +145,7 @@ private MultiGetRequest createTestInstance() { int numItems = randomIntBetween(0, 128); MultiGetRequest request = new MultiGetRequest(); for (int i = 0; i < numItems; i++) { - MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4)); if (randomBoolean()) { item.version(randomNonNegativeLong()); } diff --git a/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java b/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java index 36960ac2f322d..a167f41d41b66 100644 --- a/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java +++ b/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java @@ -64,7 +64,6 @@ public void testFromXContent() throws IOException { MultiGetItemResponse expectedItem = expected.getResponses()[i]; MultiGetItemResponse actualItem = parsed.getResponses()[i]; assertThat(actualItem.getIndex(), equalTo(expectedItem.getIndex())); - assertThat(actualItem.getType(), equalTo(expectedItem.getType())); assertThat(actualItem.getId(), equalTo(expectedItem.getId())); if (expectedItem.isFailed()) { assertThat(actualItem.isFailed(), is(true)); @@ -84,18 +83,7 @@ private static MultiGetResponse createTestInstance() { if (randomBoolean()) { items[i] = new MultiGetItemResponse( new GetResponse( - new GetResult( - randomAlphaOfLength(4), - randomAlphaOfLength(4), - randomAlphaOfLength(4), - 0, - 1, - randomNonNegativeLong(), - true, - null, - null, - null - ) + new GetResult(randomAlphaOfLength(4), randomAlphaOfLength(4), 0, 1, randomNonNegativeLong(), true, null, null, null) ), null ); @@ -103,7 +91,6 @@ private static MultiGetResponse createTestInstance() { items[i] = new MultiGetItemResponse( null, new MultiGetResponse.Failure( - randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), new RuntimeException(randomAlphaOfLength(4)) diff --git a/server/src/test/java/org/opensearch/action/get/MultiGetShardRequestTests.java b/server/src/test/java/org/opensearch/action/get/MultiGetShardRequestTests.java index fc4c7c18d528f..5bf4f50c50c63 100644 --- a/server/src/test/java/org/opensearch/action/get/MultiGetShardRequestTests.java +++ b/server/src/test/java/org/opensearch/action/get/MultiGetShardRequestTests.java @@ -58,11 +58,7 @@ public void testSerialization() throws IOException { MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0); int numItems = iterations(10, 30); for (int i = 0; i < numItems; i++) { - MultiGetRequest.Item item = new MultiGetRequest.Item( - "alias-" + randomAlphaOfLength(randomIntBetween(1, 10)), - "type", - "id-" + i - ); + MultiGetRequest.Item item = new MultiGetRequest.Item("alias-" + randomAlphaOfLength(randomIntBetween(1, 10)), "id-" + i); if (randomBoolean()) { int numFields = randomIntBetween(1, 5); String[] fields = new String[numFields]; @@ -97,7 +93,6 @@ public void testSerialization() throws IOException { MultiGetRequest.Item item = multiGetShardRequest.items.get(i); MultiGetRequest.Item item2 = multiGetShardRequest2.items.get(i); assertThat(item2.index(), equalTo(item.index())); - assertThat(item2.type(), equalTo(item.type())); assertThat(item2.id(), equalTo(item.id())); assertThat(item2.storedFields(), equalTo(item.storedFields())); assertThat(item2.version(), equalTo(item.version())); diff --git a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java index 9141b86ded5a7..09bab1af7fc43 100644 --- a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java +++ b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java @@ -225,8 +225,8 @@ public void testTransportMultiGetAction() { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE); - request.add(new MultiGetRequest.Item("index1", "_doc", "1")); - request.add(new MultiGetRequest.Item("index1", "_doc", "2")); + request.add(new MultiGetRequest.Item("index1", "1")); + request.add(new MultiGetRequest.Item("index1", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiGetAction( @@ -257,8 +257,8 @@ public void testTransportMultiGetAction_withMissingRouting() { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE); - request.add(new MultiGetRequest.Item("index2", "_doc", "1").routing("1")); - request.add(new MultiGetRequest.Item("index2", "_doc", "2")); + request.add(new MultiGetRequest.Item("index2", "1").routing("1")); + request.add(new MultiGetRequest.Item("index2", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiGetAction( diff --git a/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java b/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java index 16d7b0348b7de..21305957d802b 100644 --- a/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java @@ -137,11 +137,10 @@ public void testAutoGenIdTimestampIsSet() { public void testIndexResponse() { ShardId shardId = new ShardId(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomIntBetween(0, 1000)); - String type = randomAlphaOfLengthBetween(3, 10); String id = randomAlphaOfLengthBetween(3, 10); long version = randomLong(); boolean created = randomBoolean(); - IndexResponse indexResponse = new IndexResponse(shardId, type, id, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, version, created); + IndexResponse indexResponse = new IndexResponse(shardId, id, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, version, created); int total = randomIntBetween(1, 10); int successful = randomIntBetween(1, 10); ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(total, successful); @@ -151,7 +150,6 @@ public void testIndexResponse() { forcedRefresh = randomBoolean(); indexResponse.setForcedRefresh(forcedRefresh); } - assertEquals(type, indexResponse.getType()); assertEquals(id, indexResponse.getId()); assertEquals(version, indexResponse.getVersion()); assertEquals(shardId, indexResponse.getShardId()); @@ -162,8 +160,6 @@ public void testIndexResponse() { assertEquals( "IndexResponse[index=" + shardId.getIndexName() - + ",type=" - + type + ",id=" + id + ",version=" @@ -220,13 +216,13 @@ public void testToStringSizeLimit() throws UnsupportedEncodingException { String source = "{\"name\":\"value\"}"; request.source(source, XContentType.JSON); - assertEquals("index {[index][_doc][null], source[" + source + "]}", request.toString()); + assertEquals("index {[index][null], source[" + source + "]}", request.toString()); source = "{\"name\":\"" + randomUnicodeOfLength(IndexRequest.MAX_SOURCE_LENGTH_IN_TOSTRING) + "\"}"; request.source(source, XContentType.JSON); int actualBytes = source.getBytes("UTF-8").length; assertEquals( - "index {[index][_doc][null], source[n/a, actual length: [" + "index {[index][null], source[n/a, actual length: [" + new ByteSizeValue(actualBytes).toString() + "], max length: " + new ByteSizeValue(IndexRequest.MAX_SOURCE_LENGTH_IN_TOSTRING).toString() diff --git a/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java b/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java index ebe8d0b2aaa1b..25d6a60299848 100644 --- a/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java @@ -56,21 +56,21 @@ public class IndexResponseTests extends OpenSearchTestCase { public void testToXContent() { { - IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", 3, 17, 5, true); + IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "id", 3, 17, 5, true); String output = Strings.toString(indexResponse); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":5,\"result\":\"created\",\"_shards\":null," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":5,\"result\":\"created\",\"_shards\":null," + "\"_seq_no\":3,\"_primary_term\":17}", output ); } { - IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "type", "id", -1, 17, 7, true); + IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "id", -1, 17, 7, true); indexResponse.setForcedRefresh(true); indexResponse.setShardInfo(new ReplicationResponse.ShardInfo(10, 5)); String output = Strings.toString(indexResponse); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":7,\"result\":\"created\"," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":7,\"result\":\"created\"," + "\"forced_refresh\":true,\"_shards\":{\"total\":10,\"successful\":5,\"failed\":0}}", output ); @@ -124,7 +124,6 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws public static void assertDocWriteResponse(DocWriteResponse expected, DocWriteResponse actual) { assertEquals(expected.getIndex(), actual.getIndex()); - assertEquals(expected.getType(), actual.getType()); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getSeqNo(), actual.getSeqNo()); assertEquals(expected.getResult(), actual.getResult()); @@ -144,7 +143,6 @@ public static Tuple randomIndexResponse() { String index = randomAlphaOfLength(5); String indexUUid = randomAlphaOfLength(5); int shardId = randomIntBetween(0, 5); - String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); long seqNo = randomFrom(SequenceNumbers.UNASSIGNED_SEQ_NO, randomNonNegativeLong(), (long) randomIntBetween(0, 10000)); long primaryTerm = seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 10000); @@ -154,19 +152,11 @@ public static Tuple randomIndexResponse() { Tuple shardInfos = RandomObjects.randomShardInfo(random()); - IndexResponse actual = new IndexResponse(new ShardId(index, indexUUid, shardId), type, id, seqNo, primaryTerm, version, created); + IndexResponse actual = new IndexResponse(new ShardId(index, indexUUid, shardId), id, seqNo, primaryTerm, version, created); actual.setForcedRefresh(forcedRefresh); actual.setShardInfo(shardInfos.v1()); - IndexResponse expected = new IndexResponse( - new ShardId(index, INDEX_UUID_NA_VALUE, -1), - type, - id, - seqNo, - primaryTerm, - version, - created - ); + IndexResponse expected = new IndexResponse(new ShardId(index, INDEX_UUID_NA_VALUE, -1), id, seqNo, primaryTerm, version, created); expected.setForcedRefresh(forcedRefresh); expected.setShardInfo(shardInfos.v2()); diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java index 74a787244ca42..ff7b0dddb33a3 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java @@ -352,7 +352,7 @@ public void testAsyncSimulation() throws Exception { int numDocs = randomIntBetween(1, 64); List documents = new ArrayList<>(numDocs); for (int id = 0; id < numDocs; id++) { - documents.add(new IngestDocument("_index", "_type", Integer.toString(id), null, 0L, VersionType.INTERNAL, new HashMap<>())); + documents.add(new IngestDocument("_index", Integer.toString(id), null, 0L, VersionType.INTERNAL, new HashMap<>())); } Processor processor1 = new AbstractProcessor(null, null) { diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java index f732178821b4a..c85c0a01de63e 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -86,15 +86,7 @@ public void init() throws IOException { when(ingestService.getProcessorFactories()).thenReturn(registry); } - public void testParseUsingPipelineStoreNoType() throws Exception { - innerTestParseUsingPipelineStore(false); - } - - public void testParseUsingPipelineStoreWithType() throws Exception { - innerTestParseUsingPipelineStore(true); - } - - private void innerTestParseUsingPipelineStore(boolean useExplicitType) throws Exception { + public void testParseUsingPipelineStore(boolean useExplicitType) throws Exception { int numDocs = randomIntBetween(1, 10); Map requestContent = new HashMap<>(); @@ -104,12 +96,8 @@ private void innerTestParseUsingPipelineStore(boolean useExplicitType) throws Ex for (int i = 0; i < numDocs; i++) { Map doc = new HashMap<>(); String index = randomAlphaOfLengthBetween(1, 10); - String type = randomAlphaOfLengthBetween(1, 10); String id = randomAlphaOfLengthBetween(1, 10); doc.put(INDEX.getFieldName(), index); - if (useExplicitType) { - doc.put(TYPE.getFieldName(), type); - } doc.put(ID.getFieldName(), id); String fieldName = randomAlphaOfLengthBetween(1, 10); String fieldValue = randomAlphaOfLengthBetween(1, 10); @@ -117,11 +105,6 @@ private void innerTestParseUsingPipelineStore(boolean useExplicitType) throws Ex docs.add(doc); Map expectedDoc = new HashMap<>(); expectedDoc.put(INDEX.getFieldName(), index); - if (useExplicitType) { - expectedDoc.put(TYPE.getFieldName(), type); - } else { - expectedDoc.put(TYPE.getFieldName(), "_doc"); - } expectedDoc.put(ID.getFieldName(), id); expectedDoc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue)); expectedDocs.add(expectedDoc); @@ -140,7 +123,6 @@ private void innerTestParseUsingPipelineStore(boolean useExplicitType) throws Ex Map expectedDocument = expectedDocsIterator.next(); Map metadataMap = ingestDocument.extractMetadata(); assertThat(metadataMap.get(INDEX), equalTo(expectedDocument.get(INDEX.getFieldName()))); - assertThat(metadataMap.get(TYPE), equalTo(expectedDocument.get(TYPE.getFieldName()))); assertThat(metadataMap.get(ID), equalTo(expectedDocument.get(ID.getFieldName()))); assertThat(ingestDocument.getSourceAndMetadata(), equalTo(expectedDocument.get(Fields.SOURCE))); } @@ -148,9 +130,6 @@ private void innerTestParseUsingPipelineStore(boolean useExplicitType) throws Ex assertThat(actualRequest.getPipeline().getId(), equalTo(SIMULATED_PIPELINE_ID)); assertThat(actualRequest.getPipeline().getDescription(), nullValue()); assertThat(actualRequest.getPipeline().getProcessors().size(), equalTo(1)); - if (useExplicitType) { - assertWarnings("[types removal] specifying _type in pipeline simulation requests is deprecated"); - } } public void testParseWithProvidedPipelineNoType() throws Exception { diff --git a/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java index b9254777c1784..8be2b9b203da6 100644 --- a/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java @@ -35,7 +35,6 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.action.ActionListener; import org.opensearch.common.document.DocumentField; -import org.opensearch.common.text.Text; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.QueryBuilder; @@ -66,8 +65,8 @@ public void testCollapseSingleHit() throws IOException { for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { SearchHits hits = new SearchHits( new SearchHit[] { - new SearchHit(innerHitNum, "ID", new Text("type"), Collections.emptyMap(), Collections.emptyMap()), - new SearchHit(innerHitNum + 1, "ID", new Text("type"), Collections.emptyMap(), Collections.emptyMap()) }, + new SearchHit(innerHitNum, "ID", Collections.emptyMap(), Collections.emptyMap()), + new SearchHit(innerHitNum + 1, "ID", Collections.emptyMap(), Collections.emptyMap()) }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -109,7 +108,6 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL assertThat(groupBuilder.must(), Matchers.contains(QueryBuilders.termQuery("foo", "bar"))); } assertArrayEquals(mockSearchPhaseContext.getRequest().indices(), searchRequest.indices()); - assertArrayEquals(mockSearchPhaseContext.getRequest().types(), searchRequest.types()); List mSearchResponses = new ArrayList<>(numInnerHits); for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { @@ -137,7 +135,6 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL new SearchHit( 1, "ID", - new Text("type"), Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue))), Collections.emptyMap() ) }, @@ -166,8 +163,8 @@ public void testFailOneItemFailsEntirePhase() throws IOException { SearchHits collapsedHits = new SearchHits( new SearchHit[] { - new SearchHit(2, "ID", new Text("type"), Collections.emptyMap(), Collections.emptyMap()), - new SearchHit(3, "ID", new Text("type"), Collections.emptyMap(), Collections.emptyMap()) }, + new SearchHit(2, "ID", Collections.emptyMap(), Collections.emptyMap()), + new SearchHit(3, "ID", Collections.emptyMap(), Collections.emptyMap()) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -210,14 +207,12 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL new SearchHit( 1, "ID", - new Text("type"), Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue))), Collections.emptyMap() ), new SearchHit( 2, "ID2", - new Text("type"), Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue))), Collections.emptyMap() ) }, @@ -247,14 +242,12 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL new SearchHit( 1, "ID", - new Text("type"), Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(null))), Collections.emptyMap() ), new SearchHit( 2, "ID2", - new Text("type"), Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(null))), Collections.emptyMap() ) }, diff --git a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java index 3b09e9861bd1f..de80ea22071e8 100644 --- a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java @@ -88,13 +88,11 @@ public void testSimpleAdd() throws Exception { request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS)) ); - assertThat(request.requests().get(0).types().length, equalTo(0)); assertThat(request.requests().get(1).indices()[0], equalTo("test")); assertThat( request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS)) ); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); assertThat(request.requests().get(2).indices()[0], equalTo("test")); assertThat( request.requests().get(2).indicesOptions(), @@ -112,12 +110,9 @@ public void testSimpleAdd() throws Exception { ); assertThat(request.requests().get(5).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(5).types().length, equalTo(0)); assertThat(request.requests().get(6).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(6).types().length, equalTo(0)); assertThat(request.requests().get(6).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); assertThat(request.requests().get(7).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(7).types().length, equalTo(0)); } public void testFailWithUnknownKey() { @@ -148,7 +143,6 @@ public void testSimpleAddWithCarriageReturn() throws Exception { request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS)) ); - assertThat(request.requests().get(0).types().length, equalTo(0)); } public void testCancelAfterIntervalAtParentAndFewChildRequest() throws Exception { @@ -197,23 +191,17 @@ public void testDefaultIndicesOptions() throws IOException { request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, SearchRequest.DEFAULT_INDICES_OPTIONS)) ); - assertThat(request.requests().get(0).types().length, equalTo(0)); } public void testSimpleAdd2() throws Exception { MultiSearchRequest request = parseMultiSearchRequestFromFile("/org/opensearch/action/search/simple-msearch2.json"); assertThat(request.requests().size(), equalTo(5)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); - assertThat(request.requests().get(0).types().length, equalTo(0)); assertThat(request.requests().get(1).indices()[0], equalTo("test")); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); assertThat(request.requests().get(2).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(2).types().length, equalTo(0)); assertThat(request.requests().get(3).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(3).types().length, equalTo(0)); assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); assertThat(request.requests().get(4).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(4).types().length, equalTo(0)); } public void testSimpleAdd3() throws Exception { @@ -223,13 +211,9 @@ public void testSimpleAdd3() throws Exception { assertThat(request.requests().get(0).indices()[1], equalTo("test1")); assertThat(request.requests().get(1).indices()[0], equalTo("test2")); assertThat(request.requests().get(1).indices()[1], equalTo("test3")); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); assertThat(request.requests().get(2).indices()[0], equalTo("test4")); assertThat(request.requests().get(2).indices()[1], equalTo("test1")); - assertThat(request.requests().get(2).types()[0], equalTo("type2")); - assertThat(request.requests().get(2).types()[1], equalTo("type1")); assertThat(request.requests().get(3).indices(), is(Strings.EMPTY_ARRAY)); - assertThat(request.requests().get(3).types().length, equalTo(0)); assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); } @@ -242,13 +226,10 @@ public void testSimpleAdd4() throws Exception { assertThat(request.requests().get(0).preference(), nullValue()); assertThat(request.requests().get(1).indices()[0], equalTo("test2")); assertThat(request.requests().get(1).indices()[1], equalTo("test3")); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); assertThat(request.requests().get(1).requestCache(), nullValue()); assertThat(request.requests().get(1).preference(), equalTo("_local")); assertThat(request.requests().get(2).indices()[0], equalTo("test4")); assertThat(request.requests().get(2).indices()[1], equalTo("test1")); - assertThat(request.requests().get(2).types()[0], equalTo("type2")); - assertThat(request.requests().get(2).types()[1], equalTo("type1")); assertThat(request.requests().get(2).routing(), equalTo("123")); } @@ -419,7 +400,6 @@ public void testMultiLineSerialization() throws IOException { null, null, null, - null, xContentRegistry(), true, deprecationLogger diff --git a/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java index 3774cafe12c00..2a6d6ee7e45bb 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java @@ -424,7 +424,7 @@ private static AtomicArray generateFetchResults(int nShards, List searchHits = new ArrayList<>(); for (ScoreDoc scoreDoc : mergedSearchDocs) { if (scoreDoc.shardIndex == shardIndex) { - searchHits.add(new SearchHit(scoreDoc.doc, "", new Text(""), Collections.emptyMap(), Collections.emptyMap())); + searchHits.add(new SearchHit(scoreDoc.doc, "", Collections.emptyMap(), Collections.emptyMap())); if (scoreDoc.score > maxScore) { maxScore = scoreDoc.score; } @@ -435,7 +435,7 @@ private static AtomicArray generateFetchResults(int nShards, for (CompletionSuggestion.Entry.Option option : ((CompletionSuggestion) suggestion).getOptions()) { ScoreDoc doc = option.getDoc(); if (doc.shardIndex == shardIndex) { - searchHits.add(new SearchHit(doc.doc, "", new Text(""), Collections.emptyMap(), Collections.emptyMap())); + searchHits.add(new SearchHit(doc.doc, "", Collections.emptyMap(), Collections.emptyMap())); if (doc.score > maxScore) { maxScore = doc.score; } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 8fe4e89a58f38..19544af63944c 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -127,7 +127,6 @@ public void testIllegalArguments() { SearchRequest searchRequest = new SearchRequest(); assertNotNull(searchRequest.indices()); assertNotNull(searchRequest.indicesOptions()); - assertNotNull(searchRequest.types()); assertNotNull(searchRequest.searchType()); NullPointerException e = expectThrows(NullPointerException.class, () -> searchRequest.indices((String[]) null)); @@ -138,11 +137,6 @@ public void testIllegalArguments() { e = expectThrows(NullPointerException.class, () -> searchRequest.indicesOptions(null)); assertEquals("indicesOptions must not be null", e.getMessage()); - e = expectThrows(NullPointerException.class, () -> searchRequest.types((String[]) null)); - assertEquals("types must not be null", e.getMessage()); - e = expectThrows(NullPointerException.class, () -> searchRequest.types((String) null)); - assertEquals("type must not be null", e.getMessage()); - e = expectThrows(NullPointerException.class, () -> searchRequest.searchType((SearchType) null)); assertEquals("searchType must not be null", e.getMessage()); @@ -242,7 +236,6 @@ private SearchRequest mutate(SearchRequest searchRequest) { ) ) ); - mutators.add(() -> mutation.types(ArrayUtils.concat(searchRequest.types(), new String[] { randomAlphaOfLength(10) }))); mutators.add(() -> mutation.preference(randomValueOtherThan(searchRequest.preference(), () -> randomAlphaOfLengthBetween(3, 10)))); mutators.add(() -> mutation.routing(randomValueOtherThan(searchRequest.routing(), () -> randomAlphaOfLengthBetween(3, 10)))); mutators.add(() -> mutation.requestCache((randomValueOtherThan(searchRequest.requestCache(), OpenSearchTestCase::randomBoolean)))); @@ -273,13 +266,13 @@ private SearchRequest mutate(SearchRequest searchRequest) { } public void testDescriptionForDefault() { - assertThat(toDescription(new SearchRequest()), equalTo("indices[], types[], search_type[QUERY_THEN_FETCH], source[]")); + assertThat(toDescription(new SearchRequest()), equalTo("indices[], search_type[QUERY_THEN_FETCH], source[]")); } public void testDescriptionIncludesScroll() { assertThat( toDescription(new SearchRequest().scroll(TimeValue.timeValueMinutes(5))), - equalTo("indices[], types[], search_type[QUERY_THEN_FETCH], scroll[5m], source[]") + equalTo("indices[], search_type[QUERY_THEN_FETCH], scroll[5m], source[]") ); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java index ae23d0bab9885..4a68503b6c3f8 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java @@ -39,7 +39,6 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; @@ -241,7 +240,7 @@ public void testFromXContentWithFailures() throws IOException { } public void testToXContent() { - SearchHit hit = new SearchHit(1, "id1", new Text("type"), Collections.emptyMap(), Collections.emptyMap()); + SearchHit hit = new SearchHit(1, "id1", Collections.emptyMap(), Collections.emptyMap()); hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; { @@ -279,7 +278,7 @@ public void testToXContent() { { expectedString.append("{\"total\":{\"value\":100,\"relation\":\"eq\"},"); expectedString.append("\"max_score\":1.5,"); - expectedString.append("\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":2.0}]}"); + expectedString.append("\"hits\":[{\"_id\":\"id1\",\"_score\":2.0}]}"); } } expectedString.append("}"); @@ -326,7 +325,7 @@ public void testToXContent() { { expectedString.append("{\"total\":{\"value\":100,\"relation\":\"eq\"},"); expectedString.append("\"max_score\":1.5,"); - expectedString.append("\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":2.0}]}"); + expectedString.append("\"hits\":[{\"_id\":\"id1\",\"_score\":2.0}]}"); } } expectedString.append("}"); diff --git a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java index bd586294f5d49..78214334928a4 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -431,7 +431,7 @@ protected void validateResponse(TermVectorsResponse esResponse, Fields luceneFie } protected TermVectorsRequestBuilder getRequestForConfig(TestConfig config) { - return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id) + return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.id) .setPayloads(config.requestPayloads) .setOffsets(config.requestOffsets) .setPositions(config.requestPositions) diff --git a/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java b/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java index 699dad124db43..58c9a8d54159e 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java @@ -193,7 +193,7 @@ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOExceptio .execute() .actionGet(); client().admin().indices().prepareRefresh().get(); - TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(1)) + TermVectorsRequestBuilder resp = client().prepareTermVectors("test", Integer.toString(1)) .setPayloads(true) .setOffsets(true) .setPositions(true) diff --git a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java index b204535b749eb..089dfcaf65517 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java @@ -49,6 +49,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.opensearch.LegacyESVersion; import org.opensearch.action.termvectors.TermVectorsRequest.Flag; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; @@ -57,7 +58,9 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.shard.ShardId; import org.opensearch.rest.action.document.RestTermVectorsAction; +import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.StreamsUtils; import org.hamcrest.Matchers; @@ -74,7 +77,7 @@ public class TermVectorsUnitTests extends OpenSearchTestCase { public void testStreamResponse() throws Exception { - TermVectorsResponse outResponse = new TermVectorsResponse("a", "b", "c"); + TermVectorsResponse outResponse = new TermVectorsResponse("a", "c"); outResponse.setExists(true); writeStandardTermVector(outResponse); @@ -91,7 +94,7 @@ public void testStreamResponse() throws Exception { // see if correct checkIfStandardTermVector(inResponse); - outResponse = new TermVectorsResponse("a", "b", "c"); + outResponse = new TermVectorsResponse("a", "c"); writeEmptyTermVector(outResponse); // write outBuffer = new ByteArrayOutputStream(); @@ -185,7 +188,7 @@ public void testRestRequestParsing() throws Exception { " {\"fields\" : [\"a\", \"b\",\"c\"], \"offsets\":false, \"positions\":false, \"payloads\":true}" ); - TermVectorsRequest tvr = new TermVectorsRequest(null, null, null); + TermVectorsRequest tvr = new TermVectorsRequest(null, null); XContentParser parser = createParser(JsonXContent.jsonXContent, inputBytes); TermVectorsRequest.parseRequest(tvr, parser); @@ -206,7 +209,7 @@ public void testRestRequestParsing() throws Exception { RestTermVectorsAction.addFieldStringsFromParameter(tvr, additionalFields); inputBytes = new BytesArray(" {\"offsets\":false, \"positions\":false, \"payloads\":true}"); - tvr = new TermVectorsRequest(null, null, null); + tvr = new TermVectorsRequest(null, null); parser = createParser(JsonXContent.jsonXContent, inputBytes); TermVectorsRequest.parseRequest(tvr, parser); additionalFields = ""; @@ -222,7 +225,7 @@ public void testRequestParsingThrowsException() { BytesReference inputBytes = new BytesArray( " {\"fields\" : \"a, b,c \", \"offsets\":false, \"positions\":false, \"payloads\":true, \"meaningless_term\":2}" ); - TermVectorsRequest tvr = new TermVectorsRequest(null, null, null); + TermVectorsRequest tvr = new TermVectorsRequest(null, null); boolean threwException = false; try { XContentParser parser = createParser(JsonXContent.jsonXContent, inputBytes); @@ -236,7 +239,7 @@ public void testRequestParsingThrowsException() { public void testStreamRequest() throws IOException { for (int i = 0; i < 10; i++) { - TermVectorsRequest request = new TermVectorsRequest("index", "type", "id"); + TermVectorsRequest request = new TermVectorsRequest("index", "id"); request.offsets(random().nextBoolean()); request.fieldStatistics(random().nextBoolean()); request.payloads(random().nextBoolean()); @@ -252,9 +255,55 @@ public void testStreamRequest() throws IOException { request.writeTo(out); // read - ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); - InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer); - TermVectorsRequest req2 = new TermVectorsRequest(esBuffer); + ByteArrayInputStream opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); + InputStreamStreamInput opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer); + TermVectorsRequest req2 = new TermVectorsRequest(opensearchBuffer); + + assertThat(request.offsets(), equalTo(req2.offsets())); + assertThat(request.fieldStatistics(), equalTo(req2.fieldStatistics())); + assertThat(request.payloads(), equalTo(req2.payloads())); + assertThat(request.positions(), equalTo(req2.positions())); + assertThat(request.termStatistics(), equalTo(req2.termStatistics())); + assertThat(request.preference(), equalTo(pref)); + assertThat(request.routing(), equalTo(null)); + assertEquals(new BytesArray("{}"), request.doc()); + assertEquals(XContentType.JSON, request.xContentType()); + } + } + + public void testStreamRequestLegacyVersion() throws IOException { + for (int i = 0; i < 10; i++) { + TermVectorsRequest request = new TermVectorsRequest("index", "id"); + request.offsets(random().nextBoolean()); + request.fieldStatistics(random().nextBoolean()); + request.payloads(random().nextBoolean()); + request.positions(random().nextBoolean()); + request.termStatistics(random().nextBoolean()); + String pref = random().nextBoolean() ? "somePreference" : null; + request.preference(pref); + request.doc(new BytesArray("{}"), randomBoolean(), XContentType.JSON); + + // write using older version which contains types + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + out.setVersion(LegacyESVersion.V_7_2_0); + request.writeTo(out); + + // First check the type on the stream was written as "_doc" by manually parsing the stream until the type + ByteArrayInputStream opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); + InputStreamStreamInput opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer); + TaskId.readFromStream(opensearchBuffer); + if (opensearchBuffer.readBoolean()) { + new ShardId(opensearchBuffer); + } + opensearchBuffer.readOptionalString(); + assertThat(opensearchBuffer.readString(), equalTo("_doc")); + + // now read the stream as normal to check it is parsed correct if received from an older node + opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); + opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer); + opensearchBuffer.setVersion(LegacyESVersion.V_7_2_0); + TermVectorsRequest req2 = new TermVectorsRequest(opensearchBuffer); assertThat(request.offsets(), equalTo(req2.offsets())); assertThat(request.fieldStatistics(), equalTo(req2.fieldStatistics())); @@ -281,7 +330,6 @@ public void testMultiParser() throws Exception { request.add(new TermVectorsRequest(), data); checkParsedParameters(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); } void checkParsedParameters(MultiTermVectorsRequest request) { @@ -294,7 +342,6 @@ void checkParsedParameters(MultiTermVectorsRequest request) { fields.add("c"); for (TermVectorsRequest singleRequest : request.requests) { assertThat(singleRequest.index(), equalTo("testidx")); - assertThat(singleRequest.type(), equalTo("test")); assertThat(singleRequest.payloads(), equalTo(false)); assertThat(singleRequest.positions(), equalTo(false)); assertThat(singleRequest.offsets(), equalTo(false)); @@ -313,14 +360,12 @@ public void testMultiParserFilter() throws Exception { request.add(new TermVectorsRequest(), data); checkParsedFilterParameters(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); } void checkParsedFilterParameters(MultiTermVectorsRequest multiRequest) { Set ids = new HashSet<>(Arrays.asList("1", "2")); for (TermVectorsRequest request : multiRequest.requests) { assertThat(request.index(), equalTo("testidx")); - assertThat(request.type(), equalTo("test")); assertTrue(ids.remove(request.id())); assertNotNull(request.filterSettings()); assertThat(request.filterSettings().maxNumTerms, equalTo(20)); diff --git a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java index b6145979e8949..f4f2d9b470a90 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java @@ -230,8 +230,8 @@ public void testTransportMultiGetAction() { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE); - request.add(new TermVectorsRequest("index1", "_doc", "1")); - request.add(new TermVectorsRequest("index2", "_doc", "2")); + request.add(new TermVectorsRequest("index1", "1")); + request.add(new TermVectorsRequest("index2", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiTermVectorsAction( @@ -262,8 +262,8 @@ public void testTransportMultiGetAction_withMissingRouting() { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE); - request.add(new TermVectorsRequest("index2", "_doc", "1").routing("1")); - request.add(new TermVectorsRequest("index2", "_doc", "2")); + request.add(new TermVectorsRequest("index2", "1").routing("1")); + request.add(new TermVectorsRequest("index2", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiTermVectorsAction( diff --git a/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java index d339a37b51188..380b0628147de 100644 --- a/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java @@ -142,7 +142,7 @@ public void setUp() throws Exception { @SuppressWarnings("unchecked") public void testFromXContent() throws Exception { - UpdateRequest request = new UpdateRequest("test", "type", "1"); + UpdateRequest request = new UpdateRequest("test", "1"); // simple script request.fromXContent(createParser(XContentFactory.jsonBuilder().startObject().field("script", "script1").endObject())); Script script = request.script(); @@ -168,7 +168,7 @@ public void testFromXContent() throws Exception { assertThat(params, equalTo(emptyMap())); // script with params - request = new UpdateRequest("test", "type", "1"); + request = new UpdateRequest("test", "1"); request.fromXContent( createParser( XContentFactory.jsonBuilder() @@ -192,7 +192,7 @@ public void testFromXContent() throws Exception { assertThat(params.size(), equalTo(1)); assertThat(params.get("param1").toString(), equalTo("value1")); - request = new UpdateRequest("test", "type", "1"); + request = new UpdateRequest("test", "1"); request.fromXContent( createParser( XContentFactory.jsonBuilder() @@ -217,7 +217,7 @@ public void testFromXContent() throws Exception { assertThat(params.get("param1").toString(), equalTo("value1")); // script with params and upsert - request = new UpdateRequest("test", "type", "1"); + request = new UpdateRequest("test", "1"); request.fromXContent( createParser( XContentFactory.jsonBuilder() @@ -254,7 +254,7 @@ public void testFromXContent() throws Exception { assertThat(upsertDoc.get("field1").toString(), equalTo("value1")); assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2")); - request = new UpdateRequest("test", "type", "1"); + request = new UpdateRequest("test", "1"); request.fromXContent( createParser( XContentFactory.jsonBuilder() @@ -288,7 +288,7 @@ public void testFromXContent() throws Exception { assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2")); // script with doc - request = new UpdateRequest("test", "type", "1"); + request = new UpdateRequest("test", "1"); request.fromXContent( createParser( XContentFactory.jsonBuilder() @@ -308,13 +308,13 @@ public void testFromXContent() throws Exception { } public void testUnknownFieldParsing() throws Exception { - UpdateRequest request = new UpdateRequest("test", "type", "1"); + UpdateRequest request = new UpdateRequest("test", "1"); XContentParser contentParser = createParser(XContentFactory.jsonBuilder().startObject().field("unknown_field", "test").endObject()); XContentParseException ex = expectThrows(XContentParseException.class, () -> request.fromXContent(contentParser)); assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field]", ex.getMessage()); - UpdateRequest request2 = new UpdateRequest("test", "type", "1"); + UpdateRequest request2 = new UpdateRequest("test", "1"); XContentParser unknownObject = createParser( XContentFactory.jsonBuilder() .startObject() @@ -329,7 +329,7 @@ public void testUnknownFieldParsing() throws Exception { } public void testFetchSourceParsing() throws Exception { - UpdateRequest request = new UpdateRequest("test", "type1", "1"); + UpdateRequest request = new UpdateRequest("test", "1"); request.fromXContent(createParser(XContentFactory.jsonBuilder().startObject().field("_source", true).endObject())); assertThat(request.fetchSource(), notNullValue()); assertThat(request.fetchSource().includes().length, equalTo(0)); @@ -370,17 +370,15 @@ public void testFetchSourceParsing() throws Exception { public void testNowInScript() throws IOException { // We just upsert one document with now() using a script - IndexRequest indexRequest = new IndexRequest("test", "type1", "2").source( - jsonBuilder().startObject().field("foo", "bar").endObject() - ); + IndexRequest indexRequest = new IndexRequest("test").id("2").source(jsonBuilder().startObject().field("foo", "bar").endObject()); { - UpdateRequest updateRequest = new UpdateRequest("test", "type1", "2").upsert(indexRequest) + UpdateRequest updateRequest = new UpdateRequest("test", "2").upsert(indexRequest) .script(mockInlineScript("ctx._source.update_timestamp = ctx._now")) .scriptedUpsert(true); long nowInMillis = randomNonNegativeLong(); // We simulate that the document is not existing yet - GetResult getResult = new GetResult("test", "type1", "2", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); + GetResult getResult = new GetResult("test", "2", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> nowInMillis); Writeable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); @@ -388,11 +386,11 @@ public void testNowInScript() throws IOException { assertEquals(nowInMillis, indexAction.sourceAsMap().get("update_timestamp")); } { - UpdateRequest updateRequest = new UpdateRequest("test", "type1", "2").upsert(indexRequest) + UpdateRequest updateRequest = new UpdateRequest("test", "2").upsert(indexRequest) .script(mockInlineScript("ctx._timestamp = ctx._now")) .scriptedUpsert(true); // We simulate that the document is not existing yet - GetResult getResult = new GetResult("test", "type1", "2", 0, 1, 0, true, new BytesArray("{}"), null, null); + GetResult getResult = new GetResult("test", "2", 0, 1, 0, true, new BytesArray("{}"), null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> 42L); Writeable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); @@ -400,15 +398,14 @@ public void testNowInScript() throws IOException { } public void testIndexTimeout() { - final GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); - final UpdateRequest updateRequest = new UpdateRequest("test", "type", "1").script(mockInlineScript("return")) - .timeout(randomTimeValue()); + final GetResult getResult = new GetResult("test", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); + final UpdateRequest updateRequest = new UpdateRequest("test", "1").script(mockInlineScript("return")).timeout(randomTimeValue()); runTimeoutTest(getResult, updateRequest); } public void testDeleteTimeout() { - final GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); - final UpdateRequest updateRequest = new UpdateRequest("test", "type", "1").script(mockInlineScript("ctx.op = delete")) + final GetResult getResult = new GetResult("test", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); + final UpdateRequest updateRequest = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = delete")) .timeout(randomTimeValue()); runTimeoutTest(getResult, updateRequest); } @@ -416,15 +413,15 @@ public void testDeleteTimeout() { public void testUpsertTimeout() throws IOException { final boolean exists = randomBoolean(); final BytesReference source = exists ? new BytesArray("{\"f\":\"v\"}") : null; - final GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, exists, source, null, null); + final GetResult getResult = new GetResult("test", "1", UNASSIGNED_SEQ_NO, 0, 0, exists, source, null, null); final XContentBuilder sourceBuilder = jsonBuilder(); sourceBuilder.startObject(); { sourceBuilder.field("f", "v"); } sourceBuilder.endObject(); - final IndexRequest upsert = new IndexRequest("test", "type", "1").source(sourceBuilder); - final UpdateRequest updateRequest = new UpdateRequest("test", "type", "1").upsert(upsert) + final IndexRequest upsert = new IndexRequest("test").id("1").source(sourceBuilder); + final UpdateRequest updateRequest = new UpdateRequest("test", "1").upsert(upsert) .script(mockInlineScript("return")) .timeout(randomTimeValue()); runTimeoutTest(getResult, updateRequest); @@ -514,11 +511,11 @@ public void testToAndFromXContent() throws IOException { } public void testToValidateUpsertRequestAndCAS() { - UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + UpdateRequest updateRequest = new UpdateRequest("index", "id"); updateRequest.setIfSeqNo(1L); updateRequest.setIfPrimaryTerm(1L); updateRequest.doc("{}", XContentType.JSON); - updateRequest.upsert(new IndexRequest("index", "type", "id")); + updateRequest.upsert(new IndexRequest("index").id("id")); assertThat( updateRequest.validate().validationErrors(), contains("upsert requests don't support `if_seq_no` and `if_primary_term`") @@ -526,15 +523,15 @@ public void testToValidateUpsertRequestAndCAS() { } public void testToValidateUpsertRequestWithVersion() { - UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + UpdateRequest updateRequest = new UpdateRequest("index", "id"); updateRequest.doc("{}", XContentType.JSON); - updateRequest.upsert(new IndexRequest("index", "type", "1").version(1L)); + updateRequest.upsert(new IndexRequest("index").id("1").version(1L)); assertThat(updateRequest.validate().validationErrors(), contains("can't provide version in upsert request")); } public void testValidate() { { - UpdateRequest request = new UpdateRequest("index", "type", "id"); + UpdateRequest request = new UpdateRequest("index", "id"); request.doc("{}", XContentType.JSON); ActionRequestValidationException validate = request.validate(); @@ -542,27 +539,18 @@ public void testValidate() { } { // Null types are defaulted to "_doc" - UpdateRequest request = new UpdateRequest("index", null, randomBoolean() ? "" : null); + UpdateRequest request = new UpdateRequest("index", null); request.doc("{}", XContentType.JSON); ActionRequestValidationException validate = request.validate(); assertThat(validate, not(nullValue())); assertThat(validate.validationErrors(), hasItems("id is missing")); } - { - // Non-null types are accepted but fail validation - UpdateRequest request = new UpdateRequest("index", "", randomBoolean() ? "" : null); - request.doc("{}", XContentType.JSON); - ActionRequestValidationException validate = request.validate(); - - assertThat(validate, not(nullValue())); - assertThat(validate.validationErrors(), hasItems("type is missing", "id is missing")); - } } public void testRoutingExtraction() throws Exception { - GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); - IndexRequest indexRequest = new IndexRequest("test", "type", "1"); + GetResult getResult = new GetResult("test", "1", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); + IndexRequest indexRequest = new IndexRequest("test").id("1"); // There is no routing and parent because the document doesn't exist assertNull(UpdateHelper.calculateRouting(getResult, null)); @@ -571,7 +559,7 @@ public void testRoutingExtraction() throws Exception { assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); // Doc exists but has no source or fields - getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, null, null); + getResult = new GetResult("test", "1", 0, 1, 0, true, null, null, null); // There is no routing and parent on either request assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); @@ -580,7 +568,7 @@ public void testRoutingExtraction() throws Exception { fields.put("_routing", new DocumentField("_routing", Collections.singletonList("routing1"))); // Doc exists and has the parent and routing fields - getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, fields, null); + getResult = new GetResult("test", "1", 0, 1, 0, true, null, fields, null); // Use the get result parent and routing assertThat(UpdateHelper.calculateRouting(getResult, indexRequest), equalTo("routing1")); @@ -588,9 +576,9 @@ public void testRoutingExtraction() throws Exception { public void testNoopDetection() throws Exception { ShardId shardId = new ShardId("test", "", 0); - GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"foo\"}"), null, null); + GetResult getResult = new GetResult("test", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"foo\"}"), null, null); - UpdateRequest request = new UpdateRequest("test", "type1", "1").fromXContent( + UpdateRequest request = new UpdateRequest("test", "1").fromXContent( createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}")) ); @@ -606,7 +594,7 @@ public void testNoopDetection() throws Exception { assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("foo")); // Change the request to be a different doc - request = new UpdateRequest("test", "type1", "1").fromXContent( + request = new UpdateRequest("test", "1").fromXContent( createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}")) ); result = updateHelper.prepareUpdateIndexRequest(shardId, request, getResult, true); @@ -619,9 +607,9 @@ public void testNoopDetection() throws Exception { public void testUpdateScript() throws Exception { ShardId shardId = new ShardId("test", "", 0); - GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"bar\"}"), null, null); + GetResult getResult = new GetResult("test", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"bar\"}"), null, null); - UpdateRequest request = new UpdateRequest("test", "type1", "1").script(mockInlineScript("ctx._source.body = \"foo\"")); + UpdateRequest request = new UpdateRequest("test", "1").script(mockInlineScript("ctx._source.body = \"foo\"")); UpdateHelper.Result result = updateHelper.prepareUpdateScriptRequest( shardId, @@ -635,7 +623,7 @@ public void testUpdateScript() throws Exception { assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("foo")); // Now where the script changes the op to "delete" - request = new UpdateRequest("test", "type1", "1").script(mockInlineScript("ctx.op = delete")); + request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = delete")); result = updateHelper.prepareUpdateScriptRequest(shardId, request, getResult, OpenSearchTestCase::randomNonNegativeLong); @@ -645,9 +633,9 @@ public void testUpdateScript() throws Exception { // We treat everything else as a No-op boolean goodNoop = randomBoolean(); if (goodNoop) { - request = new UpdateRequest("test", "type1", "1").script(mockInlineScript("ctx.op = none")); + request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = none")); } else { - request = new UpdateRequest("test", "type1", "1").script(mockInlineScript("ctx.op = bad")); + request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = bad")); } result = updateHelper.prepareUpdateScriptRequest(shardId, request, getResult, OpenSearchTestCase::randomNonNegativeLong); @@ -657,23 +645,23 @@ public void testUpdateScript() throws Exception { } public void testToString() throws IOException { - UpdateRequest request = new UpdateRequest("test", "type1", "1").script(mockInlineScript("ctx._source.body = \"foo\"")); + UpdateRequest request = new UpdateRequest("test", "1").script(mockInlineScript("ctx._source.body = \"foo\"")); assertThat( request.toString(), equalTo( - "update {[test][type1][1], doc_as_upsert[false], " + "update {[test][1], doc_as_upsert[false], " + "script[Script{type=inline, lang='mock', idOrCode='ctx._source.body = \"foo\"', options={}, params={}}], " + "scripted_upsert[false], detect_noop[true]}" ) ); - request = new UpdateRequest("test", "type1", "1").fromXContent( + request = new UpdateRequest("test", "1").fromXContent( createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}")) ); assertThat( request.toString(), equalTo( - "update {[test][type1][1], doc_as_upsert[false], " - + "doc[index {[null][_doc][null], source[{\"body\":\"bar\"}]}], scripted_upsert[false], detect_noop[true]}" + "update {[test][1], doc_as_upsert[false], " + + "doc[index {[null][null], source[{\"body\":\"bar\"}]}], scripted_upsert[false], detect_noop[true]}" ) ); } diff --git a/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java index 8ba87114f542c..ba0abd6c8e349 100644 --- a/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java @@ -68,10 +68,10 @@ public class UpdateResponseTests extends OpenSearchTestCase { public void testToXContent() throws IOException { { - UpdateResponse updateResponse = new UpdateResponse(new ShardId("index", "index_uuid", 0), "type", "id", -2, 0, 0, NOT_FOUND); + UpdateResponse updateResponse = new UpdateResponse(new ShardId("index", "index_uuid", 0), "id", -2, 0, 0, NOT_FOUND); String output = Strings.toString(updateResponse); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":0,\"result\":\"not_found\"," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":0,\"result\":\"not_found\"," + "\"_shards\":{\"total\":0,\"successful\":0,\"failed\":0}}", output ); @@ -80,7 +80,6 @@ public void testToXContent() throws IOException { UpdateResponse updateResponse = new UpdateResponse( new ReplicationResponse.ShardInfo(10, 6), new ShardId("index", "index_uuid", 1), - "type", "id", 3, 17, @@ -89,7 +88,7 @@ public void testToXContent() throws IOException { ); String output = Strings.toString(updateResponse); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"result\":\"deleted\"," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":1,\"result\":\"deleted\"," + "\"_shards\":{\"total\":10,\"successful\":6,\"failed\":0},\"_seq_no\":3,\"_primary_term\":17}", output ); @@ -103,18 +102,17 @@ public void testToXContent() throws IOException { UpdateResponse updateResponse = new UpdateResponse( new ReplicationResponse.ShardInfo(3, 2), new ShardId("books", "books_uuid", 2), - "book", "1", 7, 17, 2, UPDATED ); - updateResponse.setGetResult(new GetResult("books", "book", "1", 0, 1, 2, true, source, fields, null)); + updateResponse.setGetResult(new GetResult("books", "1", 0, 1, 2, true, source, fields, null)); String output = Strings.toString(updateResponse); assertEquals( - "{\"_index\":\"books\",\"_type\":\"book\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\"," + "{\"_index\":\"books\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\"," + "\"_shards\":{\"total\":3,\"successful\":2,\"failed\":0},\"_seq_no\":7,\"_primary_term\":17,\"get\":{" + "\"_seq_no\":0,\"_primary_term\":1,\"found\":true," + "\"_source\":{\"title\":\"Book title\",\"isbn\":\"ABC-123\"},\"fields\":{\"isbn\":[\"ABC-123\"],\"title\":[\"Book " @@ -192,7 +190,6 @@ public static Tuple randomUpdateResponse(XConten GetResult expectedGetResult = getResults.v2(); String index = actualGetResult.getIndex(); - String type = actualGetResult.getType(); String id = actualGetResult.getId(); long version = actualGetResult.getVersion(); DocWriteResponse.Result result = actualGetResult.isExists() ? DocWriteResponse.Result.UPDATED : DocWriteResponse.Result.NOT_FOUND; @@ -211,11 +208,11 @@ public static Tuple randomUpdateResponse(XConten if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { Tuple shardInfos = RandomObjects.randomShardInfo(random()); - actual = new UpdateResponse(shardInfos.v1(), actualShardId, type, id, seqNo, primaryTerm, version, result); - expected = new UpdateResponse(shardInfos.v2(), expectedShardId, type, id, seqNo, primaryTerm, version, result); + actual = new UpdateResponse(shardInfos.v1(), actualShardId, id, seqNo, primaryTerm, version, result); + expected = new UpdateResponse(shardInfos.v2(), expectedShardId, id, seqNo, primaryTerm, version, result); } else { - actual = new UpdateResponse(actualShardId, type, id, seqNo, primaryTerm, version, result); - expected = new UpdateResponse(expectedShardId, type, id, seqNo, primaryTerm, version, result); + actual = new UpdateResponse(actualShardId, id, seqNo, primaryTerm, version, result); + expected = new UpdateResponse(expectedShardId, id, seqNo, primaryTerm, version, result); } if (actualGetResult.isExists()) { diff --git a/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java b/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java index d37ff7d480bd8..e4e6594207a5e 100644 --- a/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java @@ -121,7 +121,7 @@ public void testActions() { // validation in the settings??? - ugly and conceptually wrong) // choosing arbitrary top level actions to test - client.prepareGet("idx", "type", "id").execute(new AssertingActionListener<>(GetAction.NAME, client.threadPool())); + client.prepareGet("idx", "id").execute(new AssertingActionListener<>(GetAction.NAME, client.threadPool())); client.prepareSearch().execute(new AssertingActionListener<>(SearchAction.NAME, client.threadPool())); client.prepareDelete("idx", "type", "id").execute(new AssertingActionListener<>(DeleteAction.NAME, client.threadPool())); client.admin() @@ -156,7 +156,7 @@ public void testOverrideHeader() throws Exception { expected.put("key1", key1Val); expected.put("key2", "val 2"); client.threadPool().getThreadContext().putHeader("key1", key1Val); - client.prepareGet("idx", "type", "id").execute(new AssertingActionListener<>(GetAction.NAME, expected, client.threadPool())); + client.prepareGet("idx", "id").execute(new AssertingActionListener<>(GetAction.NAME, expected, client.threadPool())); client.admin() .cluster() diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java index cde2a762786af..87860b8c536ef 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.Version; import org.opensearch.action.admin.indices.rollover.MaxAgeCondition; import org.opensearch.action.admin.indices.rollover.MaxDocsCondition; import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; @@ -52,7 +51,6 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.IndicesModule; import org.opensearch.test.OpenSearchTestCase; @@ -363,32 +361,6 @@ public void testNumberOfRoutingShards() { assertEquals("the number of source shards [2] must be a factor of [3]", iae.getMessage()); } - public void testMappingOrDefault() throws IOException { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .build(); - IndexMetadata meta = IndexMetadata.builder("index").settings(settings).build(); - assertNull(meta.mappingOrDefault()); - - meta = IndexMetadata.builder("index").settings(settings).putMapping("type", "{}").build(); - assertNotNull(meta.mappingOrDefault()); - assertEquals("type", meta.mappingOrDefault().type()); - - meta = IndexMetadata.builder("index").settings(settings).putMapping(MapperService.DEFAULT_MAPPING, "{}").build(); - assertNotNull(meta.mappingOrDefault()); - assertEquals(MapperService.DEFAULT_MAPPING, meta.mappingOrDefault().type()); - - meta = IndexMetadata.builder("index") - .settings(settings) - .putMapping("type", "{}") - .putMapping(MapperService.DEFAULT_MAPPING, "{}") - .build(); - assertNotNull(meta.mappingOrDefault()); - assertEquals("type", meta.mappingOrDefault().type()); - } - public void testMissingNumberOfShards() { final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> IndexMetadata.builder("test").build()); assertThat(e.getMessage(), containsString("must specify number of shards for index [test]")); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java index 2a18fed2d68e7..e736e27e5aa44 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -1776,7 +1776,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithSingleIndex() { assertArrayEquals(new String[] { "test-alias" }, strings); DocWriteRequest request = randomFrom( new IndexRequest("test-alias"), - new UpdateRequest("test-alias", "_type", "_id"), + new UpdateRequest("test-alias", "_id"), new DeleteRequest("test-alias") ); IllegalArgumentException exception = expectThrows( @@ -1811,7 +1811,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithMultipleIndices() { assertArrayEquals(new String[] { "test-alias" }, strings); DocWriteRequest request = randomFrom( new IndexRequest("test-alias"), - new UpdateRequest("test-alias", "_type", "_id"), + new UpdateRequest("test-alias", "_id"), new DeleteRequest("test-alias") ); IllegalArgumentException exception = expectThrows( diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 25bd128fbdc53..5caa9eb212e15 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -1096,23 +1096,12 @@ public void testBuildIndexMetadata() { Settings indexSettings = Settings.builder() .put("index.version.created", Version.CURRENT) - .put(INDEX_SOFT_DELETES_SETTING.getKey(), false) .put(SETTING_NUMBER_OF_REPLICAS, 0) .put(SETTING_NUMBER_OF_SHARDS, 1) .build(); List aliases = singletonList(AliasMetadata.builder("alias1").build()); - IndexMetadata indexMetadata = buildIndexMetadata( - "test", - aliases, - () -> null, - () -> null, - indexSettings, - 4, - sourceIndexMetadata, - false - ); + IndexMetadata indexMetadata = buildIndexMetadata("test", aliases, () -> null, indexSettings, 4, sourceIndexMetadata, false); - assertThat(indexMetadata.getSettings().getAsBoolean(INDEX_SOFT_DELETES_SETTING.getKey(), true), is(false)); assertThat(indexMetadata.getAliases().size(), is(1)); assertThat(indexMetadata.getAliases().keys().iterator().next().value, is("alias1")); assertThat("The source index primary term must be used", indexMetadata.primaryTerm(0), is(3L)); @@ -1154,36 +1143,27 @@ public void testGetIndexNumberOfRoutingShardsYieldsSourceNumberOfShards() { assertThat(targetRoutingNumberOfShards, is(6)); } - public void testSoftDeletesDisabledDeprecation() { - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false).build()); - aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - Settings.EMPTY, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() - ); - assertWarnings( - "Creating indices with soft-deletes disabled is deprecated and will be removed in future OpenSearch versions. " - + "Please do not specify value for setting [index.soft_deletes.enabled] of index [test]." - ); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - if (randomBoolean()) { - request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true).build()); - } - aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - Settings.EMPTY, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() + public void testSoftDeletesDisabledIsRejected() { + final IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + request.settings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false).build()); + aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet() + ); + }); + assertThat( + error.getMessage(), + equalTo( + "Creating indices with soft-deletes disabled is no longer supported. " + + "Please do not specify a value for setting [index.soft_deletes.enabled]." + ) ); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java index 73b10789ac4f5..f25cf07455be7 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java @@ -32,15 +32,11 @@ package org.opensearch.cluster.metadata; -import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; @@ -51,7 +47,6 @@ import java.util.Collection; import java.util.Collections; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -155,86 +150,4 @@ public void testMappingUpdateAccepts_docAsType() throws Exception { mappingMetadata.sourceAsMap() ); } - - public void testForbidMultipleTypes() throws Exception { - CreateIndexRequestBuilder createIndexRequest = client().admin() - .indices() - .prepareCreate("test") - .addMapping(MapperService.SINGLE_MAPPING_NAME); - IndexService indexService = createIndex("test", createIndexRequest); - - MetadataMappingService mappingService = getInstanceFromNode(MetadataMappingService.class); - ClusterService clusterService = getInstanceFromNode(ClusterService.class); - - PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("other_type") - .indices(new Index[] { indexService.index() }) - .source(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("other_type").endObject().endObject())); - ClusterStateTaskExecutor.ClusterTasksResult result = mappingService.putMappingExecutor.execute( - clusterService.state(), - Collections.singletonList(request) - ); - assertThat(result.executionResults.size(), equalTo(1)); - - ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); - assertFalse(taskResult.isSuccess()); - assertThat( - taskResult.getFailure().getMessage(), - containsString("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ") - ); - } - - /** - * This test checks that the multi-type validation is done before we do any other kind of validation - * on the mapping that's added, see https://github.com/elastic/elasticsearch/issues/29313 - */ - public void testForbidMultipleTypesWithConflictingMappings() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("field1") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject(); - - CreateIndexRequestBuilder createIndexRequest = client().admin() - .indices() - .prepareCreate("test") - .addMapping(MapperService.SINGLE_MAPPING_NAME, mapping); - IndexService indexService = createIndex("test", createIndexRequest); - - MetadataMappingService mappingService = getInstanceFromNode(MetadataMappingService.class); - ClusterService clusterService = getInstanceFromNode(ClusterService.class); - - String conflictingMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("other_type") - .startObject("properties") - .startObject("field1") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); - - PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("other_type") - .indices(new Index[] { indexService.index() }) - .source(conflictingMapping); - ClusterStateTaskExecutor.ClusterTasksResult result = mappingService.putMappingExecutor.execute( - clusterService.state(), - Collections.singletonList(request) - ); - assertThat(result.executionResults.size(), equalTo(1)); - - ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); - assertFalse(taskResult.isSuccess()); - assertThat( - taskResult.getFailure().getMessage(), - containsString("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ") - ); - } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java b/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java new file mode 100644 index 0000000000000..aa2be1fb652cd --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Iterator; +import java.util.concurrent.CountDownLatch; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) +public class MovePrimaryFirstTests extends OpenSearchIntegTestCase { + + protected String startDataOnlyNode(final String zone) { + final Settings settings = Settings.builder().put("node.attr.zone", zone).build(); + return internalCluster().startDataOnlyNode(settings); + } + + protected void createAndIndex(String index, int replicaCount, int shardCount) { + assertAcked( + prepareCreate( + index, + -1, + Settings.builder() + .put("number_of_shards", shardCount) + .put("number_of_replicas", replicaCount) + .put("max_result_window", 20000) + ) + ); + int startDocCountId = 0; + for (int i = 0; i < 10; i++) { + index(index, "_doc", Integer.toString(startDocCountId), "foo", "bar" + startDocCountId); + ++startDocCountId; + } + flushAndRefresh(index); + } + + /** + * Creates two nodes each in two zones and shuts down nodes in zone1 after + * relocating half the number of shards. Shards per node constraint ensures + * that exactly 50% of shards relocate to nodes in zone2 giving time to shut down + * nodes in zone1. Since primaries are relocated first as movePrimaryFirst is + * enabled, cluster should not become red and zone2 nodes have all the primaries + */ + public void testClusterGreenAfterPartialRelocation() throws InterruptedException { + internalCluster().startMasterOnlyNodes(1); + final String z1 = "zone-1", z2 = "zone-2"; + final int primaryShardCount = 6; + assertTrue("Primary shard count must be even for equal distribution across two nodes", primaryShardCount % 2 == 0); + final String z1n1 = startDataOnlyNode(z1); + ensureGreen(); + createAndIndex("foo", 1, primaryShardCount); + ensureYellow(); + // Start second node in same zone only after yellow cluster to ensure + // that one gets all primaries and other all secondaries + final String z1n2 = startDataOnlyNode(z1); + ensureGreen(); + + // Enable cluster level setting for moving primaries first and keep new + // zone nodes excluded to prevent any shard relocation + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + settingsRequest.persistentSettings( + Settings.builder().put("cluster.routing.allocation.move.primary_first", true).put("cluster.routing.allocation.exclude.zone", z2) + ); + client().admin().cluster().updateSettings(settingsRequest).actionGet(); + + final String z2n1 = startDataOnlyNode(z2); + final String z2n2 = startDataOnlyNode(z2); + + // Create cluster state listener to compute number of shards on new zone + // nodes before counting down the latch + final CountDownLatch primaryMoveLatch = new CountDownLatch(1); + final ClusterStateListener listener = event -> { + if (event.routingTableChanged()) { + final RoutingNodes routingNodes = event.state().getRoutingNodes(); + int startedCount = 0; + for (Iterator it = routingNodes.iterator(); it.hasNext();) { + RoutingNode routingNode = it.next(); + final String nodeName = routingNode.node().getName(); + if (nodeName.equals(z2n1) || nodeName.equals(z2n2)) { + startedCount += routingNode.numberOfShardsWithState(ShardRoutingState.STARTED); + } + } + + // Count down the latch once all the primary shards have initialized on nodes in zone-2 + if (startedCount == primaryShardCount) { + primaryMoveLatch.countDown(); + } + } + }; + internalCluster().clusterService().addListener(listener); + + // Exclude zone1 nodes for allocation and await latch count down + settingsRequest = new ClusterUpdateSettingsRequest(); + settingsRequest.persistentSettings( + Settings.builder() + .put("cluster.routing.allocation.exclude.zone", z1) + // Total shards per node constraint is added to pause the relocation after primary shards + // have relocated to allow time for node shutdown and validate yellow cluster + .put("cluster.routing.allocation.total_shards_per_node", primaryShardCount / 2) + ); + client().admin().cluster().updateSettings(settingsRequest); + primaryMoveLatch.await(); + + // Shutdown both nodes in zone 1 and ensure cluster does not become red + try { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(z1n1)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(z1n2)); + } catch (Exception e) {} + // Due to shards per node constraint cluster cannot be green + // Since yellow suffices for this test, not removing shards constraint + ensureYellow(); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingAwarenessTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingAwarenessTests.java new file mode 100644 index 0000000000000..d9675a548cc08 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingAwarenessTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.junit.After; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.cluster.routing.OperationRouting.IGNORE_AWARENESS_ATTRIBUTES; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class OperationRoutingAwarenessTests extends OpenSearchIntegTestCase { + + @After + public void cleanup() { + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("*"))); + } + + public void testToggleSearchAllocationAwareness() { + OperationRouting routing = internalCluster().clusterService().operationRouting(); + + // Update awareness settings + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone")) + .get(); + assertThat(routing.getAwarenessAttributes().size(), equalTo(1)); + assertThat(routing.getAwarenessAttributes().get(0), equalTo("zone")); + assertTrue(internalCluster().clusterService().operationRouting().ignoreAwarenessAttributes()); + + // Unset ignore awareness attributes + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(IGNORE_AWARENESS_ATTRIBUTES, false)) + .get(); + // assert that awareness attributes hasn't changed + assertThat(routing.getAwarenessAttributes().size(), equalTo(1)); + assertThat(routing.getAwarenessAttributes().get(0), equalTo("zone")); + assertFalse(internalCluster().clusterService().operationRouting().isIgnoreAwarenessAttr()); + assertFalse(internalCluster().clusterService().operationRouting().ignoreAwarenessAttributes()); + + // Set ignore awareness attributes to true + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(IGNORE_AWARENESS_ATTRIBUTES, true)) + .get(); + // assert that awareness attributes hasn't changed + assertThat(routing.getAwarenessAttributes().size(), equalTo(1)); + assertThat(routing.getAwarenessAttributes().get(0), equalTo("zone")); + assertTrue(routing.isIgnoreAwarenessAttr()); + assertTrue(internalCluster().clusterService().operationRouting().ignoreAwarenessAttributes()); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 349acb1a18c6d..7a5e24a7eeca2 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -35,6 +35,9 @@ import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; @@ -51,6 +54,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -58,12 +62,16 @@ import java.util.Set; import java.util.TreeMap; -import static org.opensearch.cluster.routing.OperationRouting.IGNORE_AWARENESS_ATTRIBUTES_DEPRECATION_MESSAGE; +import static java.util.Collections.singletonMap; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.object.HasToString.hasToString; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; public class OperationRoutingTests extends OpenSearchTestCase { public void testGenerateShardId() { @@ -658,6 +666,125 @@ public void testAdaptiveReplicaSelection() throws Exception { terminate(threadPool); } + // Regression test to ignore awareness attributes. This test creates shards in different zones and simulates stress + // on nodes in one zone to test if Adapative Replica Selection smartly routes the request to a node in different zone + // by ignoring the zone awareness attributes. + public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Exception { + final int numIndices = 2; + final int numShards = 1; + final int numReplicas = 1; + final String[] indexNames = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indexNames[i] = "test" + i; + } + + DiscoveryNode[] allNodes = setupNodes(); + ClusterState state = ClusterStateCreationUtils.state(allNodes[0], allNodes[3], allNodes); + // Updates cluster state by assigning shard copies on nodes + state = updateStatetoTestARS(indexNames, numShards, numReplicas, allNodes, state); + + Settings awarenessSetting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); + TestThreadPool threadPool = new TestThreadPool("testThatOnlyNodesSupport"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + + OperationRouting opRouting = new OperationRouting( + awarenessSetting, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + opRouting.setUseAdaptiveReplicaSelection(true); + assertTrue(opRouting.ignoreAwarenessAttributes()); + List searchedShards = new ArrayList<>(numShards); + Set selectedNodes = new HashSet<>(numShards); + ResponseCollectorService collector = new ResponseCollectorService(clusterService); + Map outstandingRequests = new HashMap<>(); + + GroupShardsIterator groupIterator = opRouting.searchShards( + state, + indexNames, + null, + null, + collector, + outstandingRequests + ); + assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); + + // Test that the shards use a round-robin pattern when there are no stats + assertThat(groupIterator.size(), equalTo(numIndices * numShards)); + assertThat(groupIterator.get(0).size(), equalTo(numReplicas + 1)); + + ShardRouting firstChoice = groupIterator.get(0).nextOrNull(); + assertNotNull(firstChoice); + searchedShards.add(firstChoice); + selectedNodes.add(firstChoice.currentNodeId()); + + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + + assertThat(groupIterator.size(), equalTo(numIndices * numShards)); + assertThat(groupIterator.get(0).size(), equalTo(numReplicas + 1)); + ShardRouting secondChoice = groupIterator.get(0).nextOrNull(); + assertNotNull(secondChoice); + searchedShards.add(secondChoice); + selectedNodes.add(secondChoice.currentNodeId()); + + // All the shards should be ranked equally since there are no stats yet + assertTrue(selectedNodes.contains("node_b2")); + + // Since the primary shards are divided randomly between node_a0 and node_a1 + assertTrue(selectedNodes.contains("node_a0") || selectedNodes.contains("node_a1")); + + // Now let's start adding node metrics, since that will affect which node is chosen. Adding more load to node_b2 + collector.addNodeStatistics("node_a0", 1, TimeValue.timeValueMillis(50).nanos(), TimeValue.timeValueMillis(50).nanos()); + collector.addNodeStatistics("node_a1", 20, TimeValue.timeValueMillis(100).nanos(), TimeValue.timeValueMillis(150).nanos()); + collector.addNodeStatistics("node_b2", 40, TimeValue.timeValueMillis(250).nanos(), TimeValue.timeValueMillis(250).nanos()); + outstandingRequests.put("node_a0", 1L); + outstandingRequests.put("node_a1", 1L); + outstandingRequests.put("node_b2", 1L); + + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + // node_a0 or node_a1 should be the lowest ranked node to start + groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_a"))); + + // Adding more load to node_a0 + collector.addNodeStatistics("node_a0", 10, TimeValue.timeValueMillis(200).nanos(), TimeValue.timeValueMillis(150).nanos()); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + + // Adding more load to node_a0 and node_a1 from zone-a + collector.addNodeStatistics("node_a1", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); + collector.addNodeStatistics("node_a0", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + // ARS should pick node_b2 from zone-b since both node_a0 and node_a1 are overloaded + groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_b"))); + + IOUtils.close(clusterService); + terminate(threadPool); + } + + private DiscoveryNode[] setupNodes() { + // Sets up two data nodes in zone-a and one data node in zone-b + List zones = Arrays.asList("a", "a", "b"); + DiscoveryNode[] allNodes = new DiscoveryNode[4]; + int i = 0; + for (String zone : zones) { + DiscoveryNode node = new DiscoveryNode( + "node_" + zone + i, + buildNewFakeTransportAddress(), + singletonMap("zone", zone), + Collections.singleton(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + allNodes[i++] = node; + } + DiscoveryNode master = new DiscoveryNode( + "master", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.MASTER_ROLE), + Version.CURRENT + ); + allNodes[i] = master; + return allNodes; + } + public void testAllocationAwarenessDeprecation() { OperationRouting routing = new OperationRouting( Settings.builder() @@ -665,7 +792,53 @@ public void testAllocationAwarenessDeprecation() { .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - assertWarnings(IGNORE_AWARENESS_ATTRIBUTES_DEPRECATION_MESSAGE); } + /** + * The following setup is created to test ARS + */ + private ClusterState updateStatetoTestARS( + String[] indices, + int numberOfShards, + int numberOfReplicas, + DiscoveryNode[] nodes, + ClusterState state + ) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + Metadata.Builder metadataBuilder = Metadata.builder(); + ClusterState.Builder clusterState = ClusterState.builder(state); + + for (String index : indices) { + IndexMetadata indexMetadata = IndexMetadata.builder(index) + .settings( + Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + ) + .build(); + metadataBuilder.put(indexMetadata, false).generateClusterUuidIfNeeded(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); + for (int i = 0; i < numberOfShards; i++) { + final ShardId shardId = new ShardId(index, "_na_", i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + // Assign all the primary shards on nodes in zone-a (node_a0 or node_a1) + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(index, i, nodes[randomInt(1)].getId(), null, true, ShardRoutingState.STARTED) + ); + for (int replica = 0; replica < numberOfReplicas; replica++) { + // Assign all the replicas on nodes in zone-b (node_b2) + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(index, i, nodes[2].getId(), null, false, ShardRoutingState.STARTED) + ); + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + } + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + clusterState.metadata(metadataBuilder); + clusterState.routingTable(routingTableBuilder.build()); + return clusterState.build(); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java index 8451633710ce5..5bd5b7d9f6a67 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java @@ -41,6 +41,7 @@ import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; +import java.util.Iterator; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -86,6 +87,29 @@ public void testAdd() { assertThat(routingNode.getByShardId(new ShardId("test", IndexMetadata.INDEX_UUID_NA_VALUE, 4)), equalTo(relocatingShard0)); } + public void testPrimaryFirstIterator() { + ShardRouting initializingShard3 = TestShardRouting.newShardRouting("test", 3, "node-1", false, ShardRoutingState.INITIALIZING); + ShardRouting relocatingShard4 = TestShardRouting.newShardRouting("test", 4, "node-1", "node-2", true, ShardRoutingState.RELOCATING); + ShardRouting initializingShard5 = TestShardRouting.newShardRouting("test", 5, "node-1", true, ShardRoutingState.INITIALIZING); + routingNode.add(initializingShard3); + routingNode.add(relocatingShard4); + routingNode.add(initializingShard5); + final Iterator shardRoutingIterator = routingNode.iterator(); + assertTrue(shardRoutingIterator.hasNext()); + assertThat(shardRoutingIterator.next(), equalTo(relocatingShard4)); + assertTrue(shardRoutingIterator.hasNext()); + assertThat(shardRoutingIterator.next(), equalTo(initializingShard5)); + assertTrue(shardRoutingIterator.hasNext()); + assertThat(shardRoutingIterator.next(), equalTo(unassignedShard0)); + assertTrue(shardRoutingIterator.hasNext()); + assertThat(shardRoutingIterator.next(), equalTo(initializingShard0)); + assertTrue(shardRoutingIterator.hasNext()); + assertThat(shardRoutingIterator.next(), equalTo(relocatingShard0)); + assertTrue(shardRoutingIterator.hasNext()); + assertThat(shardRoutingIterator.next(), equalTo(initializingShard3)); + assertFalse(shardRoutingIterator.hasNext()); + } + public void testUpdate() { ShardRouting startedShard0 = TestShardRouting.newShardRouting("test", 0, "node-1", false, ShardRoutingState.STARTED); ShardRouting startedShard1 = TestShardRouting.newShardRouting("test", 1, "node-1", "node-2", false, ShardRoutingState.RELOCATING); diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java new file mode 100644 index 0000000000000..3e9088d63cfb4 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java @@ -0,0 +1,163 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.routing; + +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; +import org.opensearch.common.settings.Settings; + +import java.util.Iterator; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class RoutingNodesTests extends OpenSearchAllocationTestCase { + private static final String TEST_INDEX_1 = "test1"; + private static final String TEST_INDEX_2 = "test2"; + private RoutingTable emptyRoutingTable; + private int numberOfShards; + private int numberOfReplicas; + private int shardsPerIndex; + private int totalNumberOfShards; + private static final Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); + private final AllocationService ALLOCATION_SERVICE = createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", Integer.MAX_VALUE) // don't limit recoveries + .put("cluster.routing.allocation.node_initial_primaries_recoveries", Integer.MAX_VALUE) + .put( + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), + Integer.MAX_VALUE + ) + .build() + ); + private ClusterState clusterState; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + this.numberOfShards = 5; + this.numberOfReplicas = 2; + this.shardsPerIndex = this.numberOfShards * (this.numberOfReplicas + 1); + this.totalNumberOfShards = this.shardsPerIndex * 2; + logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas); + this.emptyRoutingTable = new RoutingTable.Builder().build(); + Metadata metadata = Metadata.builder().put(createIndexMetadata(TEST_INDEX_1)).put(createIndexMetadata(TEST_INDEX_2)).build(); + + RoutingTable testRoutingTable = new RoutingTable.Builder().add( + new IndexRoutingTable.Builder(metadata.index(TEST_INDEX_1).getIndex()).initializeAsNew(metadata.index(TEST_INDEX_1)).build() + ) + .add( + new IndexRoutingTable.Builder(metadata.index(TEST_INDEX_2).getIndex()).initializeAsNew(metadata.index(TEST_INDEX_2)).build() + ) + .build(); + this.clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(testRoutingTable) + .build(); + } + + /** + * Puts primary shard index routings into initializing state + */ + private void initPrimaries() { + logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < this.numberOfReplicas + 1; i++) { + discoBuilder = discoBuilder.add(newNode("node" + i)); + } + this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build(); + ClusterState rerouteResult = ALLOCATION_SERVICE.reroute(clusterState, "reroute"); + assertThat(rerouteResult, not(equalTo(this.clusterState))); + this.clusterState = rerouteResult; + } + + /** + * Moves initializing shards into started state + */ + private void startInitializingShards(String index) { + clusterState = startInitializingShardsAndReroute(ALLOCATION_SERVICE, clusterState, index); + } + + private IndexMetadata.Builder createIndexMetadata(String indexName) { + return new IndexMetadata.Builder(indexName).settings(DEFAULT_SETTINGS) + .numberOfReplicas(this.numberOfReplicas) + .numberOfShards(this.numberOfShards); + } + + public void testInterleavedShardIterator() { + // Initialize all the shards for test index 1 and 2 + initPrimaries(); + startInitializingShards(TEST_INDEX_1); + startInitializingShards(TEST_INDEX_1); + startInitializingShards(TEST_INDEX_2); + startInitializingShards(TEST_INDEX_2); + + // Create primary shard count imbalance between two nodes + final RoutingNode node0 = this.clusterState.getRoutingNodes().node("node0"); + final RoutingNode node1 = this.clusterState.getRoutingNodes().node("node1"); + final List shardRoutingList = node0.shardsWithState(TEST_INDEX_1, ShardRoutingState.STARTED); + for (ShardRouting routing : shardRoutingList) { + if (routing.primary()) { + node0.remove(routing); + ShardRouting swap = node1.getByShardId(routing.shardId()); + node0.add(swap); + node1.remove(swap); + node1.add(routing); + } + } + + // Get primary first shard iterator and assert primary shards are iterated over first + final Iterator iterator = this.clusterState.getRoutingNodes().nodeInterleavedShardIterator(true); + boolean iteratingPrimary = true; + int shardCount = 0; + while (iterator.hasNext()) { + final ShardRouting shard = iterator.next(); + if (iteratingPrimary) { + iteratingPrimary = shard.primary(); + } else { + assert shard.primary() == false; + } + shardCount++; + } + assert shardCount == this.totalNumberOfShards; + } +} diff --git a/server/src/test/java/org/opensearch/common/io/stream/StreamTests.java b/server/src/test/java/org/opensearch/common/io/stream/BaseStreamTests.java similarity index 93% rename from server/src/test/java/org/opensearch/common/io/stream/StreamTests.java rename to server/src/test/java/org/opensearch/common/io/stream/BaseStreamTests.java index 662992bbec1c3..b92e59e43e0db 100644 --- a/server/src/test/java/org/opensearch/common/io/stream/StreamTests.java +++ b/server/src/test/java/org/opensearch/common/io/stream/BaseStreamTests.java @@ -70,7 +70,9 @@ import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.nullValue; -public class StreamTests extends OpenSearchTestCase { +public abstract class BaseStreamTests extends OpenSearchTestCase { + + protected abstract StreamInput getStreamInput(BytesReference bytesReference) throws IOException; public void testBooleanSerialization() throws IOException { final BytesStreamOutput output = new BytesStreamOutput(); @@ -85,7 +87,7 @@ public void testBooleanSerialization() throws IOException { assertThat(bytes[0], equalTo((byte) 0)); assertThat(bytes[1], equalTo((byte) 1)); - final StreamInput input = bytesReference.streamInput(); + final StreamInput input = getStreamInput(bytesReference); assertFalse(input.readBoolean()); assertTrue(input.readBoolean()); @@ -114,7 +116,7 @@ public void testOptionalBooleanSerialization() throws IOException { assertThat(bytes[1], equalTo((byte) 1)); assertThat(bytes[2], equalTo((byte) 2)); - final StreamInput input = bytesReference.streamInput(); + final StreamInput input = getStreamInput(bytesReference); final Boolean maybeFalse = input.readOptionalBoolean(); assertNotNull(maybeFalse); assertFalse(maybeFalse); @@ -139,7 +141,7 @@ public void testRandomVLongSerialization() throws IOException { long write = randomLong(); BytesStreamOutput out = new BytesStreamOutput(); out.writeZLong(write); - long read = out.bytes().streamInput().readZLong(); + long read = getStreamInput(out.bytes()).readZLong(); assertEquals(write, read); } } @@ -184,7 +186,7 @@ public void testLinkedHashMap() throws IOException { } BytesStreamOutput out = new BytesStreamOutput(); out.writeGenericValue(write); - LinkedHashMap read = (LinkedHashMap) out.bytes().streamInput().readGenericValue(); + LinkedHashMap read = (LinkedHashMap) getStreamInput(out.bytes()).readGenericValue(); assertEquals(size, read.size()); int index = 0; for (Map.Entry entry : read.entrySet()) { @@ -251,10 +253,10 @@ public void testWritableArrays() throws IOException { sourceArray = null; } out.writeOptionalArray(sourceArray); - targetArray = out.bytes().streamInput().readOptionalArray(WriteableString::new, WriteableString[]::new); + targetArray = getStreamInput(out.bytes()).readOptionalArray(WriteableString::new, WriteableString[]::new); } else { out.writeArray(sourceArray); - targetArray = out.bytes().streamInput().readArray(WriteableString::new, WriteableString[]::new); + targetArray = getStreamInput(out.bytes()).readArray(WriteableString::new, WriteableString[]::new); } assertThat(targetArray, equalTo(sourceArray)); @@ -273,11 +275,11 @@ public void testArrays() throws IOException { strings = generateRandomStringArray(10, 10, false, true); } out.writeOptionalArray(writer, strings); - deserialized = out.bytes().streamInput().readOptionalArray(reader, String[]::new); + deserialized = getStreamInput(out.bytes()).readOptionalArray(reader, String[]::new); } else { strings = generateRandomStringArray(10, 10, false, true); out.writeArray(writer, strings); - deserialized = out.bytes().streamInput().readArray(reader, String[]::new); + deserialized = getStreamInput(out.bytes()).readArray(reader, String[]::new); } assertThat(deserialized, equalTo(strings)); } @@ -342,7 +344,7 @@ private void runWriteReadCollectionTest( } try (BytesStreamOutput out = new BytesStreamOutput()) { writer.accept(out, collection); - try (StreamInput in = out.bytes().streamInput()) { + try (StreamInput in = getStreamInput(out.bytes())) { assertThat(collection, equalTo(reader.apply(in))); } } @@ -359,7 +361,7 @@ public void testSetOfLongs() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); out.writeCollection(sourceSet, StreamOutput::writeLong); - final Set targetSet = out.bytes().streamInput().readSet(StreamInput::readLong); + final Set targetSet = getStreamInput(out.bytes()).readSet(StreamInput::readLong); assertThat(targetSet, equalTo(sourceSet)); } @@ -367,7 +369,7 @@ public void testInstantSerialization() throws IOException { final Instant instant = Instant.now(); try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeInstant(instant); - try (StreamInput in = out.bytes().streamInput()) { + try (StreamInput in = getStreamInput(out.bytes())) { final Instant serialized = in.readInstant(); assertEquals(instant, serialized); } @@ -378,7 +380,7 @@ public void testOptionalInstantSerialization() throws IOException { final Instant instant = Instant.now(); try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeOptionalInstant(instant); - try (StreamInput in = out.bytes().streamInput()) { + try (StreamInput in = getStreamInput(out.bytes())) { final Instant serialized = in.readOptionalInstant(); assertEquals(instant, serialized); } @@ -387,7 +389,7 @@ public void testOptionalInstantSerialization() throws IOException { final Instant missing = null; try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeOptionalInstant(missing); - try (StreamInput in = out.bytes().streamInput()) { + try (StreamInput in = getStreamInput(out.bytes())) { final Instant serialized = in.readOptionalInstant(); assertEquals(missing, serialized); } @@ -437,7 +439,8 @@ public void testSecureStringSerialization() throws IOException { output.writeSecureString(secureString); final BytesReference bytesReference = output.bytes(); - final StreamInput input = bytesReference.streamInput(); + final StreamInput input = getStreamInput(bytesReference); + ; assertThat(secureString, is(equalTo(input.readSecureString()))); } @@ -447,7 +450,8 @@ public void testSecureStringSerialization() throws IOException { output.writeOptionalSecureString(secureString); final BytesReference bytesReference = output.bytes(); - final StreamInput input = bytesReference.streamInput(); + final StreamInput input = getStreamInput(bytesReference); + ; if (secureString != null) { assertThat(input.readOptionalSecureString(), is(equalTo(secureString))); @@ -507,7 +511,8 @@ private void assertSerialization( try (BytesStreamOutput output = new BytesStreamOutput()) { outputAssertions.accept(output); final BytesReference bytesReference = output.bytes(); - final StreamInput input = bytesReference.streamInput(); + final StreamInput input = getStreamInput(bytesReference); + ; inputAssertions.accept(input); } } diff --git a/server/src/test/java/org/opensearch/common/io/stream/ByteBufferStreamInputTests.java b/server/src/test/java/org/opensearch/common/io/stream/ByteBufferStreamInputTests.java new file mode 100644 index 0000000000000..1061b335d715a --- /dev/null +++ b/server/src/test/java/org/opensearch/common/io/stream/ByteBufferStreamInputTests.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.bytes.BytesReference; + +import java.io.IOException; +import java.nio.ByteBuffer; + +/** test the ByteBufferStreamInput using the same BaseStreamTests */ +public class ByteBufferStreamInputTests extends BaseStreamTests { + @Override + protected StreamInput getStreamInput(BytesReference bytesReference) throws IOException { + BytesRef br = bytesReference.toBytesRef(); + ByteBuffer bb = ByteBuffer.wrap(br.bytes, br.offset, br.length); + return new ByteBufferStreamInput(bb); + } +} diff --git a/server/src/test/java/org/opensearch/common/io/stream/BytesReferenceStreamInputTests.java b/server/src/test/java/org/opensearch/common/io/stream/BytesReferenceStreamInputTests.java new file mode 100644 index 0000000000000..ed77c3130a32d --- /dev/null +++ b/server/src/test/java/org/opensearch/common/io/stream/BytesReferenceStreamInputTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import org.opensearch.common.bytes.BytesReference; + +import java.io.IOException; + +/** test the BytesReferenceStream using the same BaseStreamTests */ +public class BytesReferenceStreamInputTests extends BaseStreamTests { + @Override + protected StreamInput getStreamInput(BytesReference bytesReference) throws IOException { + return bytesReference.streamInput(); + } +} diff --git a/server/src/test/java/org/opensearch/common/io/stream/BytesStreamInputTests.java b/server/src/test/java/org/opensearch/common/io/stream/BytesStreamInputTests.java new file mode 100644 index 0000000000000..c7a47e7580b02 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/io/stream/BytesStreamInputTests.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.bytes.BytesReference; + +import java.io.IOException; + +/** test the BytesStreamInput using the same BaseStreamTests */ +public class BytesStreamInputTests extends BaseStreamTests { + @Override + protected StreamInput getStreamInput(BytesReference bytesReference) throws IOException { + BytesRef br = bytesReference.toBytesRef(); + return new BytesStreamInput(br.bytes, br.offset, br.length); + } +} diff --git a/server/src/test/java/org/opensearch/common/io/stream/FilterStreamInputTests.java b/server/src/test/java/org/opensearch/common/io/stream/FilterStreamInputTests.java new file mode 100644 index 0000000000000..3cf9dc656a8f1 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/io/stream/FilterStreamInputTests.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.bytes.BytesReference; + +import java.io.IOException; + +/** test the FilterStreamInput using the same BaseStreamTests */ +public class FilterStreamInputTests extends BaseStreamTests { + @Override + protected StreamInput getStreamInput(BytesReference bytesReference) throws IOException { + BytesRef br = bytesReference.toBytesRef(); + return new FilterStreamInput(StreamInput.wrap(br.bytes, br.offset, br.length)) { + }; + } +} diff --git a/server/src/test/java/org/opensearch/common/io/stream/InputStreamStreamInputTests.java b/server/src/test/java/org/opensearch/common/io/stream/InputStreamStreamInputTests.java new file mode 100644 index 0000000000000..6a31c21445a04 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/io/stream/InputStreamStreamInputTests.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.bytes.BytesReference; + +import java.io.IOException; + +/** test the InputStreamStreamInput using the same BaseStreamTests */ +public class InputStreamStreamInputTests extends BaseStreamTests { + @Override + protected StreamInput getStreamInput(BytesReference bytesReference) throws IOException { + BytesRef br = bytesReference.toBytesRef(); + return new InputStreamStreamInput(StreamInput.wrap(br.bytes, br.offset, br.length)); + } +} diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 55e3e3ac6c1ef..30fcf4bb32989 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -66,8 +66,8 @@ protected boolean enableWarningsCheck() { public static void checkJvmProperties() { boolean runtimeJdk8 = JavaVersion.current().getVersion().get(0) == 8; assert (runtimeJdk8 && ("SPI,JRE".equals(System.getProperty("java.locale.providers")))) - || (false == runtimeJdk8 - && ("SPI,COMPAT".equals(System.getProperty("java.locale.providers")))) : "`-Djava.locale.providers` needs to be set"; + || (false == runtimeJdk8 && ("SPI,COMPAT".equals(System.getProperty("java.locale.providers")))) + : "`-Djava.locale.providers` needs to be set"; assumeFalse( "won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs", runtimeJdk8 diff --git a/server/src/test/java/org/opensearch/common/network/InetAddressesTests.java b/server/src/test/java/org/opensearch/common/network/InetAddressesTests.java index 138a1097cc576..3de32f1e22f4e 100644 --- a/server/src/test/java/org/opensearch/common/network/InetAddressesTests.java +++ b/server/src/test/java/org/opensearch/common/network/InetAddressesTests.java @@ -33,11 +33,18 @@ import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; +import java.net.Inet6Address; import java.net.InetAddress; import java.net.NetworkInterface; import java.net.UnknownHostException; +import java.util.Collections; import java.util.Enumeration; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assume.assumeThat; + public class InetAddressesTests extends OpenSearchTestCase { public void testForStringBogusInput() { String[] bogusInputs = { @@ -147,11 +154,12 @@ public void testForStringIPv6WithScopeIdInput() throws java.io.IOException { String scopeId = null; while (interfaces.hasMoreElements()) { final NetworkInterface nint = interfaces.nextElement(); - if (nint.isLoopback()) { + if (nint.isLoopback() && Collections.list(nint.getInetAddresses()).stream().anyMatch(Inet6Address.class::isInstance)) { scopeId = nint.getName(); break; } } + assumeThat("The loopback interface has no IPv6 address assigned", scopeId, is(not(nullValue()))); assertNotNull(scopeId); String ipStr = "0:0:0:0:0:0:0:1%" + scopeId; InetAddress ipv6Addr = InetAddress.getByName(ipStr); diff --git a/server/src/test/java/org/opensearch/common/util/ByteUtilsTests.java b/server/src/test/java/org/opensearch/common/util/ByteUtilsTests.java index 47fdfe5ef85f9..95c77e5d606ed 100644 --- a/server/src/test/java/org/opensearch/common/util/ByteUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/util/ByteUtilsTests.java @@ -32,8 +32,6 @@ package org.opensearch.common.util; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteArrayDataOutput; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -81,42 +79,4 @@ public void testDouble() throws IOException { } } - public void testVLong() throws IOException { - final long[] data = new long[scaledRandomIntBetween(1000, 10000)]; - for (int i = 0; i < data.length; ++i) { - switch (randomInt(4)) { - case 0: - data[i] = 0; - break; - case 1: - data[i] = Long.MAX_VALUE; - break; - case 2: - data[i] = Long.MIN_VALUE; - break; - case 3: - data[i] = randomInt(1 << randomIntBetween(2, 30)); - break; - case 4: - data[i] = randomLong(); - break; - default: - throw new AssertionError(); - } - } - final byte[] encoded = new byte[ByteUtils.MAX_BYTES_VLONG * data.length]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); - for (int i = 0; i < data.length; ++i) { - final int pos = out.getPosition(); - ByteUtils.writeVLong(out, data[i]); - if (data[i] < 0) { - assertEquals(ByteUtils.MAX_BYTES_VLONG, out.getPosition() - pos); - } - } - final ByteArrayDataInput in = new ByteArrayDataInput(encoded); - for (int i = 0; i < data.length; ++i) { - assertEquals(data[i], ByteUtils.readVLong(in)); - } - } - } diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index 3592298c34995..47feced5302f4 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -311,16 +311,14 @@ public void testRefreshActuallyWorks() throws Exception { // before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible assertTrue(refreshTask.isClosed()); refreshTask = indexService.getRefreshTask(); - assertBusy( - () -> { - // this one either becomes visible due to a concurrently running scheduled refresh OR due to the force refresh - // we are running on updateMetadata if the interval changes - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); - assertEquals(1, search.totalHits.value); - } + assertBusy(() -> { + // this one either becomes visible due to a concurrently running scheduled refresh OR due to the force refresh + // we are running on updateMetadata if the interval changes + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); + assertEquals(1, search.totalHits.value); } - ); + }); assertFalse(refreshTask.isClosed()); // refresh every millisecond client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); @@ -330,25 +328,21 @@ public void testRefreshActuallyWorks() throws Exception { .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")) .get(); assertTrue(refreshTask.isClosed()); - assertBusy( - () -> { - // this one becomes visible due to the force refresh we are running on updateMetadata if the interval changes - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); - assertEquals(2, search.totalHits.value); - } + assertBusy(() -> { + // this one becomes visible due to the force refresh we are running on updateMetadata if the interval changes + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); + assertEquals(2, search.totalHits.value); } - ); + }); client().prepareIndex("test", "test", "2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); - assertBusy( - () -> { - // this one becomes visible due to the scheduled refresh - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); - assertEquals(3, search.totalHits.value); - } + assertBusy(() -> { + // this one becomes visible due to the scheduled refresh + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); + assertEquals(3, search.totalHits.value); } - ); + }); } public void testAsyncFsyncActuallyWorks() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 48c3094ee4b56..d67534bbfbddf 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -40,7 +40,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.translog.Translog; @@ -758,38 +757,4 @@ public void testIgnoreTranslogRetentionSettingsIfSoftDeletesEnabled() { assertThat(indexSettings.getTranslogRetentionAge().millis(), equalTo(-1L)); assertThat(indexSettings.getTranslogRetentionSize().getBytes(), equalTo(-1L)); } - - public void testUpdateTranslogRetentionSettingsWithSoftDeletesDisabled() { - Settings.Builder settings = Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT); - - TimeValue ageSetting = TimeValue.timeValueHours(12); - if (randomBoolean()) { - ageSetting = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueMillis(randomIntBetween(0, 10000)); - settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), ageSetting); - } - ByteSizeValue sizeSetting = new ByteSizeValue(512, ByteSizeUnit.MB); - if (randomBoolean()) { - sizeSetting = randomBoolean() ? new ByteSizeValue(-1) : new ByteSizeValue(randomIntBetween(0, 1024)); - settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), sizeSetting); - } - IndexMetadata metadata = newIndexMeta("index", settings.build()); - IndexSettings indexSettings = new IndexSettings(metadata, Settings.EMPTY); - assertThat(indexSettings.getTranslogRetentionAge(), equalTo(ageSetting)); - assertThat(indexSettings.getTranslogRetentionSize(), equalTo(sizeSetting)); - - Settings.Builder newSettings = Settings.builder().put(settings.build()); - if (randomBoolean()) { - ageSetting = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueMillis(randomIntBetween(0, 10000)); - newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), ageSetting); - } - if (randomBoolean()) { - sizeSetting = randomBoolean() ? new ByteSizeValue(-1) : new ByteSizeValue(randomIntBetween(0, 1024)); - newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), sizeSetting); - } - indexSettings.updateIndexMetadata(newIndexMeta("index", newSettings.build())); - assertThat(indexSettings.getTranslogRetentionAge(), equalTo(ageSetting)); - assertThat(indexSettings.getTranslogRetentionSize(), equalTo(sizeSetting)); - } } diff --git a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java index 360559e68db7a..22d185643018a 100644 --- a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java @@ -9,7 +9,14 @@ package org.opensearch.index; import org.junit.Before; +import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.bulk.BulkItemRequest; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkShardRequest; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.client.Requests; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; @@ -43,11 +50,14 @@ public void testCoordinatingOperationForShardIndexingPressure() { IndexingPressureService service = new IndexingPressureService(settings, clusterService); Index index = new Index("IndexName", "UUID"); ShardId shardId = new ShardId(index, 0); - - Releasable releasable = service.markCoordinatingOperationStarted(shardId, 1024, false); + BulkItemRequest[] items = new BulkItemRequest[1]; + DocWriteRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + items[0] = new BulkItemRequest(0, writeRequest); + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.NONE, items); + Releasable releasable = service.markCoordinatingOperationStarted(shardId, bulkShardRequest::ramBytesUsed, false); IndexingPressurePerShardStats shardStats = service.shardStats(CommonStatsFlags.ALL).getIndexingPressureShardStats(shardId); - assertEquals(1024, shardStats.getCurrentCoordinatingBytes()); + assertEquals(bulkShardRequest.ramBytesUsed(), shardStats.getCurrentCoordinatingBytes()); releasable.close(); } @@ -64,11 +74,12 @@ public void testCoordinatingOperationForIndexingPressure() { ); clusterSettings.applySettings(updated.build()); - Releasable releasable = service.markCoordinatingOperationStarted(1024, false); + BulkRequest bulkRequest = new BulkRequest(); + Releasable releasable = service.markCoordinatingOperationStarted(bulkRequest::ramBytesUsed, false); IndexingPressurePerShardStats shardStats = service.shardStats(CommonStatsFlags.ALL).getIndexingPressureShardStats(shardId); assertNull(shardStats); IndexingPressureStats nodeStats = service.nodeStats(); - assertEquals(1024, nodeStats.getCurrentCoordinatingBytes()); + assertEquals(bulkRequest.ramBytesUsed(), nodeStats.getCurrentCoordinatingBytes()); releasable.close(); } diff --git a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java index bc7a57cc21176..4735c3a8ee3cb 100644 --- a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java @@ -156,26 +156,6 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 ); - assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - ); - indexSettings.updateIndexMetadata( - newIndexMeta( - "index", - Settings.builder() - .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1 - ) - .build() - ) - ); - assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1 - ); - assertEquals( ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), @@ -259,10 +239,6 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE ); - assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - ); assertEquals( ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), diff --git a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java index 87dcf1eee7ad1..cbc439041666f 100644 --- a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java @@ -69,12 +69,6 @@ public void testSetMaxMergeAtOnce() { assertEquals(42, policy.regularMergePolicy.getMaxMergeAtOnce()); } - public void testSetMaxMergeAtOnceExplicit() { - OpenSearchTieredMergePolicy policy = new OpenSearchTieredMergePolicy(); - policy.setMaxMergeAtOnceExplicit(42); - assertEquals(42, policy.forcedMergePolicy.getMaxMergeAtOnceExplicit()); - } - public void testSetSegmentsPerTier() { OpenSearchTieredMergePolicy policy = new OpenSearchTieredMergePolicy(); policy.setSegmentsPerTier(42); diff --git a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java index b81e194ca4282..ae159092a4833 100644 --- a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java @@ -96,7 +96,7 @@ protected SearchContext createSearchContext(IndexService indexService) { protected SearchContext createSearchContext(IndexService indexService, String... groupStats) { BigArrays bigArrays = indexService.getBigArrays(); - final ShardSearchRequest request = new ShardSearchRequest(new ShardId(indexService.index(), 0), new String[0], 0L, null); + final ShardSearchRequest request = new ShardSearchRequest(new ShardId(indexService.index(), 0), 0L, null); return new TestSearchContext(bigArrays, indexService) { @Override public List groupStats() { @@ -258,30 +258,12 @@ public void testSlowLogHasJsonFields() throws IOException { assertThat(p.getValueFor("took"), equalTo("10nanos")); assertThat(p.getValueFor("took_millis"), equalTo("0")); assertThat(p.getValueFor("total_hits"), equalTo("-1")); - assertThat(p.getValueFor("types"), equalTo("[]")); assertThat(p.getValueFor("stats"), equalTo("[]")); assertThat(p.getValueFor("search_type"), Matchers.nullValue()); assertThat(p.getValueFor("total_shards"), equalTo("1")); assertThat(p.getValueFor("source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); } - public void testSlowLogWithTypes() throws IOException { - IndexService index = createIndex("foo"); - SearchContext searchContext = searchContextWithSourceAndTask(index); - searchContext.getQueryShardContext().setTypes("type1", "type2"); - SearchSlowLog.SearchSlowLogMessage p = new SearchSlowLog.SearchSlowLogMessage(searchContext, 10); - - assertThat(p.getValueFor("types"), equalTo("[\\\"type1\\\", \\\"type2\\\"]")); - - searchContext.getQueryShardContext().setTypes("type1"); - p = new SearchSlowLog.SearchSlowLogMessage(searchContext, 10); - assertThat(p.getValueFor("types"), equalTo("[\\\"type1\\\"]")); - - searchContext.getQueryShardContext().setTypes(); - p = new SearchSlowLog.SearchSlowLogMessage(searchContext, 10); - assertThat(p.getValueFor("types"), equalTo("[]")); - } - public void testSlowLogsWithStats() throws IOException { IndexService index = createIndex("foo"); SearchContext searchContext = createSearchContext(index, "group1"); diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index b589ef4cccaef..cb8cc7ef6ab03 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -55,7 +55,6 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; -import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; @@ -82,6 +81,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; +import org.apache.lucene.util.SetOnce; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -98,7 +98,6 @@ import org.opensearch.common.Strings; import org.opensearch.common.TriFunction; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; @@ -115,6 +114,7 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.IndexSettings; @@ -165,7 +165,6 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -313,202 +312,6 @@ public void testVersionMapAfterAutoIDDocument() throws IOException { } } - public void testSegmentsWithoutSoftDeletes() throws Exception { - Settings settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) - .build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build() - ); - try ( - Store store = createStore(); - InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null)) - ) { - List segments = engine.segments(false); - assertThat(segments.isEmpty(), equalTo(true)); - assertThat(engine.segmentsStats(false, false).getCount(), equalTo(0L)); - assertThat(engine.segmentsStats(false, false).getMemoryInBytes(), equalTo(0L)); - - // create two docs and refresh - ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); - Engine.Index first = indexForDoc(doc); - Engine.IndexResult firstResult = engine.index(first); - ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); - Engine.Index second = indexForDoc(doc2); - Engine.IndexResult secondResult = engine.index(second); - assertThat(secondResult.getTranslogLocation(), greaterThan(firstResult.getTranslogLocation())); - engine.refresh("test"); - - segments = engine.segments(false); - assertThat(segments.size(), equalTo(1)); - SegmentsStats stats = engine.segmentsStats(false, false); - assertThat(stats.getCount(), equalTo(1L)); - assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getNormsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L)); - assertThat(segments.get(0).isCommitted(), equalTo(false)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - assertThat(segments.get(0).ramTree, nullValue()); - engine.flush(); - - segments = engine.segments(false); - assertThat(segments.size(), equalTo(1)); - assertThat(engine.segmentsStats(false, false).getCount(), equalTo(1L)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - ParsedDocument doc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_3, null); - engine.index(indexForDoc(doc3)); - engine.refresh("test"); - - segments = engine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(engine.segmentsStats(false, false).getCount(), equalTo(2L)); - assertThat(engine.segmentsStats(false, false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes())); - assertThat( - engine.segmentsStats(false, false).getStoredFieldsMemoryInBytes(), - greaterThan(stats.getStoredFieldsMemoryInBytes()) - ); - assertThat(engine.segmentsStats(false, false).getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(engine.segmentsStats(false, false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes())); - assertThat(engine.segmentsStats(false, false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes())); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - assertThat(segments.get(1).isCommitted(), equalTo(false)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get())); - engine.refresh("test"); - - segments = engine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(engine.segmentsStats(false, false).getCount(), equalTo(2L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - assertThat(segments.get(1).isCommitted(), equalTo(false)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - engine.onSettingsChanged( - indexSettings.getTranslogRetentionAge(), - indexSettings.getTranslogRetentionSize(), - indexSettings.getSoftDeleteRetentionOperations() - ); - ParsedDocument doc4 = testParsedDocument("4", null, testDocumentWithTextField(), B_3, null); - engine.index(indexForDoc(doc4)); - engine.refresh("test"); - - segments = engine.segments(false); - assertThat(segments.size(), equalTo(3)); - assertThat(engine.segmentsStats(false, false).getCount(), equalTo(3L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - assertThat(segments.get(1).isCommitted(), equalTo(false)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - assertThat(segments.get(2).isCommitted(), equalTo(false)); - assertThat(segments.get(2).isSearch(), equalTo(true)); - assertThat(segments.get(2).getNumDocs(), equalTo(1)); - assertThat(segments.get(2).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(2).isCompound(), equalTo(true)); - - // internal refresh - lets make sure we see those segments in the stats - ParsedDocument doc5 = testParsedDocument("5", null, testDocumentWithTextField(), B_3, null); - engine.index(indexForDoc(doc5)); - engine.refresh("test", Engine.SearcherScope.INTERNAL, true); - - segments = engine.segments(false); - assertThat(segments.size(), equalTo(4)); - assertThat(engine.segmentsStats(false, false).getCount(), equalTo(4L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - assertThat(segments.get(1).isCommitted(), equalTo(false)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - assertThat(segments.get(2).isCommitted(), equalTo(false)); - assertThat(segments.get(2).isSearch(), equalTo(true)); - assertThat(segments.get(2).getNumDocs(), equalTo(1)); - assertThat(segments.get(2).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(2).isCompound(), equalTo(true)); - - assertThat(segments.get(3).isCommitted(), equalTo(false)); - assertThat(segments.get(3).isSearch(), equalTo(false)); - assertThat(segments.get(3).getNumDocs(), equalTo(1)); - assertThat(segments.get(3).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(3).isCompound(), equalTo(true)); - - // now refresh the external searcher and make sure it has the new segment - engine.refresh("test"); - segments = engine.segments(false); - assertThat(segments.size(), equalTo(4)); - assertThat(engine.segmentsStats(false, false).getCount(), equalTo(4L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - assertThat(segments.get(1).isCommitted(), equalTo(false)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - assertThat(segments.get(2).isCommitted(), equalTo(false)); - assertThat(segments.get(2).isSearch(), equalTo(true)); - assertThat(segments.get(2).getNumDocs(), equalTo(1)); - assertThat(segments.get(2).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(2).isCompound(), equalTo(true)); - - assertThat(segments.get(3).isCommitted(), equalTo(false)); - assertThat(segments.get(3).isSearch(), equalTo(true)); - assertThat(segments.get(3).getNumDocs(), equalTo(1)); - assertThat(segments.get(3).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(3).isCompound(), equalTo(true)); - } - } - public void testVerboseSegments() throws Exception { try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { List segments = engine.segments(true); @@ -642,17 +445,12 @@ public void testSegmentsStatsIncludingFileSizes() throws Exception { } } - public void testSegmentsWithSoftDeletes() throws Exception { - Settings.Builder settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); - final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); + public void testSegments() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( Store store = createStore(); InternalEngine engine = createEngine( - config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get) + config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get) ) ) { assertThat(engine.segments(false), empty()); @@ -749,10 +547,9 @@ public long getProcessedCheckpoint() { : randomIntBetween(0, (int) localCheckpoint.get()) ); - final Engine.CommitId commitId = engine.flush(true, true); + engine.flush(true, true); CommitStats stats2 = engine.commitStats(); - assertThat(stats2.getRawCommitId(), equalTo(commitId)); assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration())); assertThat(stats2.getId(), notNullValue()); assertThat(stats2.getId(), not(equalTo(stats1.getId()))); @@ -861,9 +658,9 @@ public void testTranslogRecoveryDoesNotReplayIntoTranslog() throws IOException { recoveringEngine = new InternalEngine(initialEngine.config()) { @Override - protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { + protected void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException { committed.set(true); - super.commitIndexWriter(writer, translog, syncId); + super.commitIndexWriter(writer, translog); } }; assertThat(getTranslog(recoveringEngine).stats().getUncommittedOperations(), equalTo(docs)); @@ -1317,137 +1114,31 @@ public void testSyncTranslogConcurrently() throws Exception { checker.run(); } - public void testSyncedFlush() throws IOException { - try ( - Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null) - ) { - final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); - ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); - engine.index(indexForDoc(doc)); - Engine.CommitId commitID = engine.flush(); - assertThat(commitID, equalTo(new Engine.CommitId(store.readLastCommittedSegmentsInfo().getId()))); - byte[] wrongBytes = Base64.getDecoder().decode(commitID.toString()); - wrongBytes[0] = (byte) ~wrongBytes[0]; - Engine.CommitId wrongId = new Engine.CommitId(wrongBytes); - assertEquals( - "should fail to sync flush with wrong id (but no docs)", - engine.syncFlush(syncId + "1", wrongId), - Engine.SyncedFlushResult.COMMIT_MISMATCH - ); - engine.index(indexForDoc(doc)); - assertEquals( - "should fail to sync flush with right id but pending doc", - engine.syncFlush(syncId + "2", commitID), - Engine.SyncedFlushResult.PENDING_OPERATIONS - ); - commitID = engine.flush(); - assertEquals( - "should succeed to flush commit with right id and no pending doc", - engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS - ); - assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - } - } - - public void testRenewSyncFlush() throws Exception { - final int iters = randomIntBetween(2, 5); // run this a couple of times to get some coverage - for (int i = 0; i < iters; i++) { - try ( - Store store = createStore(); - InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), new LogDocMergePolicy(), null)) - ) { - final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); - Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null)); - engine.index(doc1); - assertEquals(engine.getLastWriteNanos(), doc1.startTime()); - engine.flush(); - Engine.Index doc2 = indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null)); - engine.index(doc2); - assertEquals(engine.getLastWriteNanos(), doc2.startTime()); - engine.flush(); - final boolean forceMergeFlushes = randomBoolean(); - final ParsedDocument parsedDoc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null); - if (forceMergeFlushes) { - engine.index( - new Engine.Index( - newUid(parsedDoc3), - parsedDoc3, - UNASSIGNED_SEQ_NO, - 0, - Versions.MATCH_ANY, - VersionType.INTERNAL, - Engine.Operation.Origin.PRIMARY, - System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), - -1, - false, - UNASSIGNED_SEQ_NO, - 0 - ) - ); - } else { - engine.index(indexForDoc(parsedDoc3)); - } - Engine.CommitId commitID = engine.flush(); - assertEquals( - "should succeed to flush commit with right id and no pending doc", - engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS - ); - assertEquals(3, engine.segments(false).size()); - - engine.forceMerge(forceMergeFlushes, 1, false, false, false, UUIDs.randomBase64UUID()); - if (forceMergeFlushes == false) { - engine.refresh("make all segments visible"); - assertEquals(4, engine.segments(false).size()); - assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - assertTrue(engine.tryRenewSyncCommit()); - assertEquals(1, engine.segments(false).size()); - } else { - engine.refresh("test"); - assertBusy(() -> assertEquals(1, engine.segments(false).size())); - } - assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - - if (randomBoolean()) { - Engine.Index doc4 = indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null)); - engine.index(doc4); - assertEquals(engine.getLastWriteNanos(), doc4.startTime()); - } else { - Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid(), primaryTerm.get()); - engine.delete(delete); - assertEquals(engine.getLastWriteNanos(), delete.startTime()); - } - assertFalse(engine.tryRenewSyncCommit()); - // we might hit a concurrent flush from a finishing merge here - just wait if ongoing... - engine.flush(false, true); - assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID)); - assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - } - public void testSyncedFlushSurvivesEngineRestart() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); IOUtils.close(store, engine); + SetOnce indexWriterHolder = new SetOnce<>(); + IndexWriterFactory indexWriterFactory = (directory, iwc) -> { + indexWriterHolder.set(new IndexWriter(directory, iwc)); + return indexWriterHolder.get(); + }; store = createStore(); - engine = createEngine(store, primaryTranslogDir, globalCheckpoint::get); + engine = createEngine( + defaultSettings, + store, + primaryTranslogDir, + newMergePolicy(), + indexWriterFactory, + null, + globalCheckpoint::get + ); final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); engine.index(indexForDoc(doc)); globalCheckpoint.set(0L); - final Engine.CommitId commitID = engine.flush(); - assertEquals( - "should succeed to flush commit with right id and no pending doc", - engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS - ); + engine.flush(); + syncFlush(indexWriterHolder.get(), engine, syncId); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); EngineConfig config = engine.config(); if (randomBoolean()) { engine.close(); @@ -1469,17 +1160,30 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { } public void testSyncedFlushVanishesOnReplay() throws IOException { + IOUtils.close(store, engine); + SetOnce indexWriterHolder = new SetOnce<>(); + IndexWriterFactory indexWriterFactory = (directory, iwc) -> { + indexWriterHolder.set(new IndexWriter(directory, iwc)); + return indexWriterHolder.get(); + }; + store = createStore(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + engine = createEngine( + defaultSettings, + store, + primaryTranslogDir, + newMergePolicy(), + indexWriterFactory, + null, + globalCheckpoint::get + ); final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); + globalCheckpoint.set(engine.getProcessedLocalCheckpoint()); engine.index(indexForDoc(doc)); - final Engine.CommitId commitID = engine.flush(); - assertEquals( - "should succeed to flush commit with right id and no pending doc", - engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS - ); + engine.flush(); + syncFlush(indexWriterHolder.get(), engine, syncId); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); - assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); doc = testParsedDocument("2", null, testDocumentWithTextField(), new BytesArray("{}"), null); engine.index(indexForDoc(doc)); EngineConfig config = engine.config(); @@ -1492,6 +1196,16 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { ); } + void syncFlush(IndexWriter writer, InternalEngine engine, String syncId) throws IOException { + try (ReleasableLock ignored = engine.writeLock.acquire()) { + Map userData = new HashMap<>(); + writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); + userData.put(Engine.SYNC_COMMIT_ID, syncId); + writer.setLiveCommitData(userData.entrySet()); + writer.commit(); + } + } + public void testVersioningNewCreate() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); @@ -1591,7 +1305,7 @@ public void testVersionedUpdate() throws IOException { Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.id(), create.uid()), searcherFactory)) { assertEquals(1, get.version()); } @@ -1599,7 +1313,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_1_result = engine.index(update_1); assertThat(update_1_result.getVersion(), equalTo(2L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.id(), create.uid()), searcherFactory)) { assertEquals(2, get.version()); } @@ -1607,7 +1321,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_2_result = engine.index(update_2); assertThat(update_2_result.getVersion(), equalTo(3L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.id(), create.uid()), searcherFactory)) { assertEquals(3, get.version()); } @@ -1627,8 +1341,7 @@ public void testGetIfSeqNoIfPrimaryTerm() throws IOException { } try ( Engine.GetResult get = engine.get( - new Engine.Get(true, true, doc.type(), doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo()) - .setIfPrimaryTerm(primaryTerm.get()), + new Engine.Get(true, true, doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo()).setIfPrimaryTerm(primaryTerm.get()), searcherFactory ) ) { @@ -1638,7 +1351,7 @@ public void testGetIfSeqNoIfPrimaryTerm() throws IOException { expectThrows( VersionConflictEngineException.class, () -> engine.get( - new Engine.Get(true, false, doc.type(), doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo() + 1) + new Engine.Get(true, false, doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo() + 1) .setIfPrimaryTerm(primaryTerm.get()), searcherFactory ) @@ -1647,7 +1360,7 @@ public void testGetIfSeqNoIfPrimaryTerm() throws IOException { expectThrows( VersionConflictEngineException.class, () -> engine.get( - new Engine.Get(true, false, doc.type(), doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo()) + new Engine.Get(true, false, doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo()) .setIfPrimaryTerm(primaryTerm.get() + 1), searcherFactory ) @@ -1656,7 +1369,7 @@ public void testGetIfSeqNoIfPrimaryTerm() throws IOException { final VersionConflictEngineException versionConflictEngineException = expectThrows( VersionConflictEngineException.class, () -> engine.get( - new Engine.Get(true, false, doc.type(), doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo() + 1) + new Engine.Get(true, false, doc.id(), create.uid()).setIfSeqNo(indexResult.getSeqNo() + 1) .setIfPrimaryTerm(primaryTerm.get() + 1), searcherFactory ) @@ -1688,59 +1401,6 @@ public void testVersioningNewIndex() throws IOException { assertThat(indexResult.getVersion(), equalTo(1L)); } - public void testForceMergeWithoutSoftDeletes() throws IOException { - Settings settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) - .build(); - IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); - try ( - Store store = createStore(); - Engine engine = createEngine( - config(IndexSettingsModule.newIndexSettings(indexMetadata), store, createTempDir(), new LogByteSizeMergePolicy(), null) - ) - ) { // use log MP here we test some behavior in ESMP - int numDocs = randomIntBetween(10, 100); - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); - Engine.Index index = indexForDoc(doc); - engine.index(index); - engine.refresh("test"); - } - try (Engine.Searcher test = engine.acquireSearcher("test")) { - assertEquals(numDocs, test.getIndexReader().numDocs()); - } - engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()); - engine.refresh("test"); - assertEquals(engine.segments(true).size(), 1); - - ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), B_1, null); - Engine.Index index = indexForDoc(doc); - engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); - // expunge deletes - engine.forceMerge(true, 10, true, false, false, UUIDs.randomBase64UUID()); - engine.refresh("test"); - - assertEquals(engine.segments(true).size(), 1); - try (Engine.Searcher test = engine.acquireSearcher("test")) { - assertEquals(numDocs - 1, test.getIndexReader().numDocs()); - assertEquals(engine.config().getMergePolicy().toString(), numDocs - 1, test.getIndexReader().maxDoc()); - } - - doc = testParsedDocument(Integer.toString(1), null, testDocument(), B_1, null); - index = indexForDoc(doc); - engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); - // expunge deletes - engine.forceMerge(true, 10, false, false, false, UUIDs.randomBase64UUID()); - engine.refresh("test"); - assertEquals(engine.segments(true).size(), 1); - try (Engine.Searcher test = engine.acquireSearcher("test")) { - assertEquals(numDocs - 2, test.getIndexReader().numDocs()); - assertEquals(numDocs - 1, test.getIndexReader().maxDoc()); - } - } - } - /* * we are testing an edge case here where we have a fully deleted segment that is retained but has all it's IDs pruned away. */ @@ -1779,18 +1439,12 @@ public void testLookupVersionWithPrunedAwayIds() throws IOException { } public void testUpdateWithFullyDeletedSegments() throws IOException { - Settings.Builder settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), Integer.MAX_VALUE); - final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final Set liveDocs = new HashSet<>(); try ( Store store = createStore(); InternalEngine engine = createEngine( - config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get) + config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get) ) ) { int numDocs = scaledRandomIntBetween(10, 100); @@ -1812,7 +1466,6 @@ public void testForceMergeWithSoftDeletesRetention() throws Exception { final long retainedExtraOps = randomLongBetween(0, 10); Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); @@ -1892,7 +1545,6 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc final long retainedExtraOps = randomLongBetween(0, 10); Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); @@ -2578,7 +2230,7 @@ public void testVersioningPromotedReplica() throws IOException { final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine); final long currentSeqNo = getSequenceID( replicaEngine, - new Engine.Get(false, false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid()) + new Engine.Get(false, false, lastReplicaOp.uid().text(), lastReplicaOp.uid()) ).v1(); try (Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { final TotalHitCountCollector collector = new TotalHitCountCollector(); @@ -2644,7 +2296,7 @@ class OpAndVersion { throw new AssertionError(e); } for (int op = 0; op < opsPerThread; op++) { - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = new ArrayList<>(Strings.commaDelimitedListToSet(visitor.source().utf8ToString())); @@ -2700,7 +2352,7 @@ class OpAndVersion { assertTrue(op.added + " should not exist", exists); } - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = Arrays.asList(Strings.commaDelimitedListToStringArray(visitor.source().utf8ToString())); @@ -3207,7 +2859,7 @@ public void testEnableGcDeletes() throws Exception { ); // Get should not find the document (we never indexed uid=2): - getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")), searcherFactory); + getResult = engine.get(new Engine.Get(true, false, "2", newUid("2")), searcherFactory); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: @@ -3511,8 +3163,8 @@ public void testTranslogCleanUpPostCommitCrash() throws Exception { ) { @Override - protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { - super.commitIndexWriter(writer, translog, syncId); + protected void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException { + super.commitIndexWriter(writer, translog); if (throwErrorOnCommit.get()) { throw new RuntimeException("power's out"); } @@ -4850,7 +4502,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { } public void testSequenceIDs() throws Exception { - Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, "type", "2", newUid("1"))); + Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, "2", newUid("1"))); // Non-existent doc returns no seqnum and no primary term assertThat(seqID.v1(), equalTo(UNASSIGNED_SEQ_NO)); assertThat(seqID.v2(), equalTo(0L)); @@ -4967,15 +4619,10 @@ public void testLookupSeqNoByIdInLucene() throws Exception { } } Randomness.shuffle(operations); - Settings.Builder settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); - final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); Map latestOps = new HashMap<>(); // id -> latest seq_no try ( Store store = createStore(); - InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null)) + InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null)) ) { CheckedRunnable lookupAndCheck = () -> { try (Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { @@ -5208,7 +4855,7 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio } assertThat(engine.getProcessedLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); - try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { + try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); } } @@ -5857,14 +5504,14 @@ public void testKeepTranslogAfterGlobalCheckpoint() throws Exception { final AtomicLong lastSyncedGlobalCheckpointBeforeCommit = new AtomicLong(Translog.readGlobalCheckpoint(translogPath, translogUUID)); try (InternalEngine engine = new InternalEngine(engineConfig) { @Override - protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { + protected void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException { lastSyncedGlobalCheckpointBeforeCommit.set(Translog.readGlobalCheckpoint(translogPath, translogUUID)); // Advance the global checkpoint during the flush to create a lag between a persisted global checkpoint in the translog // (this value is visible to the deletion policy) and an in memory global checkpoint in the SequenceNumbersService. if (rarely()) { globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getPersistedLocalCheckpoint())); } - super.commitIndexWriter(writer, translog, syncId); + super.commitIndexWriter(writer, translog); } }) { engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); @@ -6191,12 +5838,10 @@ public void testShouldPeriodicallyFlushAfterMerge() throws Exception { assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(2)); engine.refresh("test"); engine.forceMerge(false, 1, false, false, false, UUIDs.randomBase64UUID()); - assertBusy( - () -> { - // the merge listner runs concurrently after the force merge returned - assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); - } - ); + assertBusy(() -> { + // the merge listner runs concurrently after the force merge returned + assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); + }); engine.flush(); assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); } @@ -6335,22 +5980,14 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup Thread thread = new Thread(() -> { awaitStarted.countDown(); try ( - Engine.GetResult getResult = engine.get( - new Engine.Get(true, false, doc3.type(), doc3.id(), doc3.uid()), - engine::acquireSearcher - ) + Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.id(), doc3.uid()), engine::acquireSearcher) ) { assertTrue(getResult.exists()); } }); thread.start(); awaitStarted.await(); - try ( - Engine.GetResult getResult = engine.get( - new Engine.Get(true, false, doc.type(), doc.id(), doc.uid()), - engine::acquireSearcher - ) - ) { + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.id(), doc.uid()), engine::acquireSearcher)) { assertFalse(getResult.exists()); } thread.join(); @@ -6499,7 +6136,6 @@ public void testHistoryBasedOnSource() throws Exception { ); Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); @@ -6529,10 +6165,8 @@ public void testHistoryBasedOnSource() throws Exception { } } MapperService mapperService = createMapperService("test"); - List luceneOps = readAllOperationsBasedOnSource(engine, Engine.HistorySource.INDEX, mapperService); - List translogOps = readAllOperationsBasedOnSource(engine, Engine.HistorySource.TRANSLOG, mapperService); + List luceneOps = readAllOperationsBasedOnSource(engine, mapperService); assertThat(luceneOps.stream().map(o -> o.seqNo()).collect(Collectors.toList()), containsInAnyOrder(expectedSeqNos.toArray())); - assertThat(translogOps.stream().map(o -> o.seqNo()).collect(Collectors.toList()), containsInAnyOrder(expectedSeqNos.toArray())); } } @@ -6569,7 +6203,6 @@ private void assertOperationHistoryInLucene(List operations) t ); Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); @@ -6609,7 +6242,6 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { IOUtils.close(engine, store); Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); @@ -6685,7 +6317,7 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { if (rarely()) { engine.forceMerge(randomBoolean(), 1, false, false, false, UUIDs.randomBase64UUID()); } - try (Closeable ignored = engine.acquireHistoryRetentionLock(Engine.HistorySource.INDEX)) { + try (Closeable ignored = engine.acquireHistoryRetentionLock()) { long minRetainSeqNos = engine.getMinRetainedSeqNo(); assertThat(minRetainSeqNos, lessThanOrEqualTo(globalCheckpoint.get() + 1)); Long[] expectedOps = existingSeqNos.stream().filter(seqno -> seqno >= minRetainSeqNos).toArray(Long[]::new); @@ -6741,17 +6373,10 @@ public void testLuceneSnapshotRefreshesOnlyOnce() throws Exception { final MapperService mapperService = createMapperService("test"); final long maxSeqNo = randomLongBetween(10, 50); final AtomicLong refreshCounter = new AtomicLong(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()) - .settings( - Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - ) - .build() - ); try ( Store store = createStore(); InternalEngine engine = createEngine( - config(indexSettings, store, createTempDir(), newMergePolicy(), null, new ReferenceManager.RefreshListener() { + config(defaultSettings, store, createTempDir(), newMergePolicy(), null, new ReferenceManager.RefreshListener() { @Override public void beforeRefresh() { refreshCounter.incrementAndGet(); @@ -6812,17 +6437,9 @@ public void testAcquireSearcherOnClosingEngine() throws Exception { public void testNoOpOnClosingEngine() throws Exception { engine.close(); - Settings settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build() - ); - assertTrue(indexSettings.isSoftDeleteEnabled()); try ( Store store = createStore(); - InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null)) + InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null)) ) { engine.close(); expectThrows( @@ -6834,17 +6451,9 @@ public void testNoOpOnClosingEngine() throws Exception { public void testSoftDeleteOnClosingEngine() throws Exception { engine.close(); - Settings settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build() - ); - assertTrue(indexSettings.isSoftDeleteEnabled()); try ( Store store = createStore(); - InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null)) + InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null)) ) { engine.close(); expectThrows(AlreadyClosedException.class, () -> engine.delete(replicaDeleteForDoc("test", 42, 7, System.nanoTime()))); @@ -6888,19 +6497,13 @@ public void testTrackMaxSeqNoOfUpdatesOrDeletesOnPrimary() throws Exception { } public void testRebuildLocalCheckpointTrackerAndVersionMap() throws Exception { - Settings.Builder settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10000) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); - final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); Path translogPath = createTempDir(); List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()); List> commits = new ArrayList<>(); commits.add(new ArrayList<>()); try (Store store = createStore()) { - EngineConfig config = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + EngineConfig config = config(defaultSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); final List docs; try (InternalEngine engine = createEngine(config)) { List flushedOperations = new ArrayList<>(); @@ -6974,63 +6577,6 @@ public void testRebuildLocalCheckpointTrackerAndVersionMap() throws Exception { } } - public void testOpenSoftDeletesIndexWithSoftDeletesDisabled() throws Exception { - try (Store store = createStore()) { - Path translogPath = createTempDir(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final IndexSettings softDeletesEnabled = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()) - .settings( - Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - ) - .build() - ); - final List docs; - try ( - InternalEngine engine = createEngine( - config(softDeletesEnabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get) - ) - ) { - List ops = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean()); - applyOperations(engine, ops); - engine.syncTranslog(); // to advance persisted checkpoint - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); - engine.flush(); - docs = getDocIds(engine, true); - } - final IndexSettings softDeletesDisabled = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()) - .settings( - Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) - ) - .build() - ); - EngineConfig config = config(softDeletesDisabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get); - try (InternalEngine engine = createEngine(config)) { - assertThat(getDocIds(engine, true), equalTo(docs)); - } - } - } - - public void testRequireSoftDeletesWhenAccessingChangesSnapshot() throws Exception { - try (Store store = createStore()) { - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()) - .settings( - Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) - ) - .build() - ); - try (InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null))) { - AssertionError error = expectThrows( - AssertionError.class, - () -> engine.newChangesSnapshot("test", createMapperService("test"), 0, randomNonNegativeLong(), randomBoolean()) - ); - assertThat(error.getMessage(), containsString("does not have soft-deletes enabled")); - } - } - } - void assertLuceneOperations(InternalEngine engine, long expectedAppends, long expectedUpdates, long expectedDeletes) { String message = "Lucene operations mismatched;" + " appends [actual:" @@ -7154,22 +6700,14 @@ public void testRefreshAndCloseEngineConcurrently() throws Exception { indexer.join(); refresher.join(); } - assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); } public void testPruneAwayDeletedButRetainedIds() throws Exception { IOUtils.close(engine, store); - Settings settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build() - ); - store = createStore(indexSettings, newDirectory()); + store = createStore(defaultSettings, newDirectory()); LogDocMergePolicy policy = new LogDocMergePolicy(); policy.setMinMergeDocs(10000); - try (InternalEngine engine = createEngine(indexSettings, store, createTempDir(), policy)) { + try (InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), policy)) { int numDocs = between(1, 20); for (int i = 0; i < numDocs; i++) { index(engine, i); @@ -7346,13 +6884,6 @@ public void testAlwaysRecordReplicaOrPeerRecoveryOperationsToTranslog() throws E public void testNoOpFailure() throws IOException { engine.close(); - final Settings settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build() - ); try (Store store = createStore(); Engine engine = createEngine((dir, iwc) -> new IndexWriter(dir, iwc) { @Override @@ -7360,7 +6891,7 @@ public long addDocument(Iterable doc) throws IOExcepti throw new IllegalArgumentException("fatal"); } - }, null, null, config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { + }, null, null, config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { final Engine.NoOp op = new Engine.NoOp(0, 0, PRIMARY, System.currentTimeMillis(), "test"); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> engine.noOp(op)); assertThat(e.getMessage(), equalTo("fatal")); @@ -7371,35 +6902,17 @@ public long addDocument(Iterable doc) throws IOExcepti } } - public void testDeleteFailureSoftDeletesEnabledDocAlreadyDeleted() throws IOException { - runTestDeleteFailure(true, InternalEngine::delete); - } - - public void testDeleteFailureSoftDeletesEnabled() throws IOException { - runTestDeleteFailure(true, (engine, op) -> {}); - } - - public void testDeleteFailureSoftDeletesDisabled() throws IOException { - runTestDeleteFailure(false, (engine, op) -> {}); + public void testDeleteFailureDocAlreadyDeleted() throws IOException { + runTestDeleteFailure(InternalEngine::delete); } - private void runTestDeleteFailure( - final boolean softDeletesEnabled, - final CheckedBiConsumer consumer - ) throws IOException { + private void runTestDeleteFailure(final CheckedBiConsumer consumer) throws IOException { engine.close(); - final Settings settings = Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), softDeletesEnabled) - .build(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build() - ); final AtomicReference iw = new AtomicReference<>(); try (Store store = createStore(); InternalEngine engine = createEngine((dir, iwc) -> { iw.set(new ThrowingIndexWriter(dir, iwc)); return iw.get(); - }, null, null, config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { + }, null, null, config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { engine.index(new Engine.Index(newUid("0"), primaryTerm.get(), InternalEngineTests.createParsedDoc("0", null))); final Engine.Delete op = new Engine.Delete("_doc", "0", newUid("0"), primaryTerm.get()); consumer.accept(engine, op); @@ -7661,7 +7174,6 @@ public void testProducesStoredFieldsReader() throws Exception { public void testMaxDocsOnPrimary() throws Exception { engine.close(); - final boolean softDeleteEnabled = engine.config().getIndexSettings().isSoftDeleteEnabled(); int maxDocs = randomIntBetween(1, 100); IndexWriterMaxDocsChanger.setMaxDocs(maxDocs); try { @@ -7670,7 +7182,7 @@ public void testMaxDocsOnPrimary() throws Exception { List operations = new ArrayList<>(numDocs); for (int i = 0; i < numDocs; i++) { final String id; - if (softDeleteEnabled == false || randomBoolean()) { + if (randomBoolean()) { id = Integer.toString(randomInt(numDocs)); operations.add(indexForDoc(createParsedDoc(id, null))); } else { @@ -7703,10 +7215,6 @@ public void testMaxDocsOnPrimary() throws Exception { } public void testMaxDocsOnReplica() throws Exception { - assumeTrue( - "Deletes do not add documents to Lucene with soft-deletes disabled", - engine.config().getIndexSettings().isSoftDeleteEnabled() - ); engine.close(); int maxDocs = randomIntBetween(1, 100); IndexWriterMaxDocsChanger.setMaxDocs(maxDocs); diff --git a/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java index d46563ff09ccb..65b8a81b029c0 100644 --- a/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java @@ -182,7 +182,6 @@ public void testNoOpEngineStats() throws Exception { ); assertEquals(0, noOpEngine.segmentsStats(includeFileSize, false).getFileSizes().size()); - assertEquals(0, noOpEngine.segmentsStats(includeFileSize, false).getMemoryInBytes()); } catch (AssertionError e) { logger.error(config.getMergePolicy()); throw e; diff --git a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java index 956e136575a69..609e972b2c026 100644 --- a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java @@ -150,50 +150,6 @@ public void testReadOnlyEngine() throws Exception { } } - public void testFlushes() throws IOException { - IOUtils.close(engine, store); - Engine readOnlyEngine = null; - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); - int numDocs = scaledRandomIntBetween(10, 1000); - try (InternalEngine engine = createEngine(config)) { - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - engine.index( - new Engine.Index( - newUid(doc), - doc, - i, - primaryTerm.get(), - 1, - null, - Engine.Operation.Origin.REPLICA, - System.nanoTime(), - -1, - false, - SequenceNumbers.UNASSIGNED_SEQ_NO, - 0 - ) - ); - if (rarely()) { - engine.flush(); - } - engine.syncTranslog(); // advance persisted local checkpoint - globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); - } - globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); - engine.syncTranslog(); - engine.flushAndClose(); - readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, null, null, true, Function.identity(), true); - Engine.CommitId flush = readOnlyEngine.flush(randomBoolean(), true); - assertEquals(flush, readOnlyEngine.flush(randomBoolean(), true)); - } finally { - IOUtils.close(readOnlyEngine); - } - } - } - public void testEnsureMaxSeqNoIsEqualToGlobalCheckpoint() throws IOException { IOUtils.close(engine, store); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -263,7 +219,6 @@ public void testReadOnly() throws IOException { expectThrows(expectedException, () -> readOnlyEngine.index(null)); expectThrows(expectedException, () -> readOnlyEngine.delete(null)); expectThrows(expectedException, () -> readOnlyEngine.noOp(null)); - expectThrows(UnsupportedOperationException.class, () -> readOnlyEngine.syncFlush(null, null)); } } } diff --git a/server/src/test/java/org/opensearch/index/engine/SegmentTests.java b/server/src/test/java/org/opensearch/index/engine/SegmentTests.java index 103e71c5ddc75..744b0d0cb4733 100644 --- a/server/src/test/java/org/opensearch/index/engine/SegmentTests.java +++ b/server/src/test/java/org/opensearch/index/engine/SegmentTests.java @@ -95,7 +95,6 @@ static Segment randomSegment() { segment.version = Version.LUCENE_7_0_0; segment.compound = randomBoolean(); segment.mergeId = randomAlphaOfLengthBetween(1, 10); - segment.memoryInBytes = randomNonNegativeLong(); segment.segmentSort = randomIndexSort(); if (randomBoolean()) { segment.attributes = Collections.singletonMap("foo", "bar"); @@ -123,7 +122,6 @@ static boolean isSegmentEquals(Segment seg1, Segment seg2) { && Objects.equals(seg1.version, seg2.version) && Objects.equals(seg1.compound, seg2.compound) && seg1.sizeInBytes == seg2.sizeInBytes - && seg1.memoryInBytes == seg2.memoryInBytes && seg1.getGeneration() == seg2.getGeneration() && seg1.getName().equals(seg2.getName()) && seg1.getMergeId().equals(seg2.getMergeId()) diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java index 5fb55366556aa..33813580468aa 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -225,7 +225,7 @@ protected void fillExtendedMvSet() throws Exception { writer.addDocument(d); d = new Document(); - d.add(new StringField("_id", "6", Field.Store.NO)); + addField(d, "_id", "6"); writer.addDocument(d); d = new Document(); diff --git a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java index aeaef91fd020e..d2f9125be996a 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java @@ -143,7 +143,7 @@ public void testGetForFieldRuntimeField() { searchLookupSetOnce.set(searchLookup); return (IndexFieldData.Builder) (cache, breakerService) -> null; }); - SearchLookup searchLookup = new SearchLookup(null, null, null); + SearchLookup searchLookup = new SearchLookup(null, null); ifdService.getForField(ft, "qualified", () -> searchLookup); assertSame(searchLookup, searchLookupSetOnce.get().get()); } diff --git a/server/src/test/java/org/opensearch/index/get/GetResultTests.java b/server/src/test/java/org/opensearch/index/get/GetResultTests.java index 2c9cbe5edbd77..9519b83fa54b1 100644 --- a/server/src/test/java/org/opensearch/index/get/GetResultTests.java +++ b/server/src/test/java/org/opensearch/index/get/GetResultTests.java @@ -97,7 +97,6 @@ public void testToXContent() throws IOException { { GetResult getResult = new GetResult( "index", - "type", "id", 0, 1, @@ -109,16 +108,16 @@ public void testToXContent() throws IOException { ); String output = Strings.toString(getResult); assertEquals( - "{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"metafield\":\"metavalue\",\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"}," + "\"fields\":{\"field1\":[\"value1\"]}}", output ); } { - GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); + GetResult getResult = new GetResult("index", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); String output = Strings.toString(getResult); - assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"found\":false}", output); + assertEquals("{\"_index\":\"index\",\"_id\":\"id\",\"found\":false}", output); } } @@ -129,7 +128,6 @@ public void testToAndFromXContentEmbedded() throws Exception { // We don't expect to retrieve the index/type/id of the GetResult because they are not rendered // by the toXContentEmbedded method. GetResult expectedGetResult = new GetResult( - null, null, null, tuple.v2().getSeqNo(), @@ -166,7 +164,6 @@ public void testToXContentEmbedded() throws IOException { GetResult getResult = new GetResult( "index", - "type", "id", 0, 1, @@ -186,7 +183,7 @@ public void testToXContentEmbedded() throws IOException { } public void testToXContentEmbeddedNotFound() throws IOException { - GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); + GetResult getResult = new GetResult("index", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); assertEquals("{\"found\":false}", originalBytes.utf8ToString()); @@ -194,7 +191,7 @@ public void testToXContentEmbeddedNotFound() throws IOException { public void testSerializationNotFound() throws IOException { // serializes and deserializes with streamable, then prints back to xcontent - GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); + GetResult getResult = new GetResult("index", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); BytesStreamOutput out = new BytesStreamOutput(); getResult.writeTo(out); @@ -222,7 +219,6 @@ public void testEqualsAndHashcode() { public static GetResult copyGetResult(GetResult getResult) { return new GetResult( getResult.getIndex(), - getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), @@ -238,21 +234,6 @@ public static GetResult mutateGetResult(GetResult getResult) { List> mutations = new ArrayList<>(); mutations.add( () -> new GetResult( - randomUnicodeOfLength(15), - getResult.getType(), - getResult.getId(), - getResult.getSeqNo(), - getResult.getPrimaryTerm(), - getResult.getVersion(), - getResult.isExists(), - getResult.internalSourceRef(), - getResult.getFields(), - null - ) - ); - mutations.add( - () -> new GetResult( - getResult.getIndex(), randomUnicodeOfLength(15), getResult.getId(), getResult.getSeqNo(), @@ -267,7 +248,6 @@ public static GetResult mutateGetResult(GetResult getResult) { mutations.add( () -> new GetResult( getResult.getIndex(), - getResult.getType(), randomUnicodeOfLength(15), getResult.getSeqNo(), getResult.getPrimaryTerm(), @@ -281,7 +261,6 @@ public static GetResult mutateGetResult(GetResult getResult) { mutations.add( () -> new GetResult( getResult.getIndex(), - getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), @@ -295,7 +274,6 @@ public static GetResult mutateGetResult(GetResult getResult) { mutations.add( () -> new GetResult( getResult.getIndex(), - getResult.getType(), getResult.getId(), getResult.isExists() ? UNASSIGNED_SEQ_NO : getResult.getSeqNo(), getResult.isExists() ? 0 : getResult.getPrimaryTerm(), @@ -309,7 +287,6 @@ public static GetResult mutateGetResult(GetResult getResult) { mutations.add( () -> new GetResult( getResult.getIndex(), - getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), @@ -323,7 +300,6 @@ public static GetResult mutateGetResult(GetResult getResult) { mutations.add( () -> new GetResult( getResult.getIndex(), - getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), @@ -373,10 +349,9 @@ public static Tuple randomGetResult(XContentType xContentT version = -1; exists = false; } - GetResult getResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, docFields, metaFields); + GetResult getResult = new GetResult(index, id, seqNo, primaryTerm, version, exists, source, docFields, metaFields); GetResult expectedGetResult = new GetResult( index, - type, id, seqNo, primaryTerm, diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentFieldMapperTests.java index c088fb4b08117..3d2b8c4457c17 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentFieldMapperTests.java @@ -39,6 +39,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; @@ -84,7 +85,7 @@ private FakeFieldType(String name) { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java index 2e2e4990f2e65..9b355a8064660 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java @@ -1058,33 +1058,36 @@ public void testSimpleMapper() throws Exception { public void testParseToJsonAndParse() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/simple/test-mapping.json"); MapperService mapperService = createMapperService(mapping(b -> {})); - merge("person", mapperService, mapping); + merge(MapperService.SINGLE_MAPPING_NAME, mapperService, mapping); String builtMapping = mapperService.documentMapper().mappingSource().string(); // reparse it - DocumentMapper builtDocMapper = createDocumentMapper("_doc", builtMapping); + DocumentMapper builtDocMapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, builtMapping); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/simple/test1.json")); - Document doc = builtDocMapper.parse(new SourceToParse("test", "_doc", "1", json, XContentType.JSON)).rootDoc(); + Document doc = builtDocMapper.parse(new SourceToParse("test", MapperService.SINGLE_MAPPING_NAME, "1", json, XContentType.JSON)) + .rootDoc(); assertThat(doc.getBinaryValue(builtDocMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); assertThat(doc.get(builtDocMapper.mappers().getMapper("name.first").name()), equalTo("fred")); } public void testSimpleParser() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/simple/test-mapping.json"); - DocumentMapper docMapper = createDocumentMapper("person", mapping); + DocumentMapper docMapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, mapping); assertThat((String) docMapper.meta().get("param1"), equalTo("value1")); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/simple/test1.json")); - Document doc = docMapper.parse(new SourceToParse("test", "_doc", "1", json, XContentType.JSON)).rootDoc(); + Document doc = docMapper.parse(new SourceToParse("test", MapperService.SINGLE_MAPPING_NAME, "1", json, XContentType.JSON)) + .rootDoc(); assertThat(doc.getBinaryValue(docMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); assertThat(doc.get(docMapper.mappers().getMapper("name.first").name()), equalTo("fred")); } public void testSimpleParserNoTypeNoId() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/simple/test-mapping.json"); - DocumentMapper docMapper = createDocumentMapper("person", mapping); + DocumentMapper docMapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, mapping); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/simple/test1-notype-noid.json")); - Document doc = docMapper.parse(new SourceToParse("test", "_doc", "1", json, XContentType.JSON)).rootDoc(); + Document doc = docMapper.parse(new SourceToParse("test", MapperService.SINGLE_MAPPING_NAME, "1", json, XContentType.JSON)) + .rootDoc(); assertThat(doc.getBinaryValue(docMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); assertThat(doc.get(docMapper.mappers().getMapper("name.first").name()), equalTo("fred")); } @@ -1092,12 +1095,12 @@ public void testSimpleParserNoTypeNoId() throws Exception { public void testAttributes() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/simple/test-mapping.json"); - DocumentMapper docMapper = createDocumentMapper("person", mapping); + DocumentMapper docMapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, mapping); assertThat((String) docMapper.meta().get("param1"), equalTo("value1")); String builtMapping = docMapper.mappingSource().string(); - DocumentMapper builtDocMapper = createDocumentMapper("_doc", builtMapping); + DocumentMapper builtDocMapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, builtMapping); assertThat((String) builtDocMapper.meta().get("param1"), equalTo("value1")); } @@ -1106,7 +1109,7 @@ public void testNoDocumentSent() throws Exception { BytesReference json = new BytesArray("".getBytes(StandardCharsets.UTF_8)); MapperParsingException e = expectThrows( MapperParsingException.class, - () -> docMapper.parse(new SourceToParse("test", "_doc", "1", json, XContentType.JSON)) + () -> docMapper.parse(new SourceToParse("test", MapperService.SINGLE_MAPPING_NAME, "1", json, XContentType.JSON)) ); assertThat(e.getMessage(), equalTo("failed to parse, document is empty")); } @@ -1472,7 +1475,7 @@ public void testTypeless() throws IOException { String mapping = Strings.toString( XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject(MapperService.SINGLE_MAPPING_NAME) .startObject("properties") .startObject("foo") .field("type", "keyword") @@ -1481,7 +1484,7 @@ public void testTypeless() throws IOException { .endObject() .endObject() ); - DocumentMapper mapper = createDocumentMapper("type", mapping); + DocumentMapper mapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, mapping); ParsedDocument doc = mapper.parse(source(b -> b.field("foo", "1234"))); assertNull(doc.dynamicMappingsUpdate()); // no update since we reused the existing type diff --git a/server/src/test/java/org/opensearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/opensearch/index/mapper/DynamicTemplatesTests.java index f5e4ea8b2aaa8..70b58525e2772 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DynamicTemplatesTests.java @@ -76,7 +76,7 @@ public void testMatchTypeOnly() throws Exception { public void testSimple() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/dynamictemplate/simple/test-mapping.json"); - MapperService mapperService = createMapperService("person", mapping); + MapperService mapperService = createMapperService("_doc", mapping); String docJson = copyToStringFromClasspath("/org/opensearch/index/mapper/dynamictemplate/simple/test-data.json"); ParsedDocument parsedDoc = mapperService.documentMapper().parse(source(docJson)); @@ -131,7 +131,7 @@ public void testSimple() throws Exception { public void testSimpleWithXContentTraverse() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/dynamictemplate/simple/test-mapping.json"); - MapperService mapperService = createMapperService("person", mapping); + MapperService mapperService = createMapperService("_doc", mapping); String docJson = copyToStringFromClasspath("/org/opensearch/index/mapper/dynamictemplate/simple/test-data.json"); ParsedDocument parsedDoc = mapperService.documentMapper().parse(source(docJson)); diff --git a/server/src/test/java/org/opensearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/opensearch/index/mapper/ExternalMapper.java index 9a4af3081426d..49b38f828f837 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/opensearch/index/mapper/ExternalMapper.java @@ -40,6 +40,7 @@ import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; @@ -137,8 +138,8 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.identity(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.identity(name(), context, format); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/FakeStringFieldMapper.java b/server/src/test/java/org/opensearch/index/mapper/FakeStringFieldMapper.java index b0f17f41c4c85..9da53b36e11d6 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FakeStringFieldMapper.java +++ b/server/src/test/java/org/opensearch/index/mapper/FakeStringFieldMapper.java @@ -36,6 +36,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; @@ -95,8 +96,8 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { - return SourceValueFetcher.toString(name(), mapperService, format); + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.toString(name(), context, format); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java index f755fbcda54cc..8a37a72ab7be4 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java @@ -42,9 +42,6 @@ import org.opensearch.test.OpenSearchTestCase; import org.mockito.Mockito; -import java.util.Collection; -import java.util.Collections; - public class IdFieldTypeTests extends OpenSearchTestCase { public void testRangeQuery() { @@ -70,16 +67,12 @@ public void testTermsQuery() { Mockito.when(context.indexVersionCreated()).thenReturn(indexSettings.getAsVersion(IndexMetadata.SETTING_VERSION_CREATED, null)); MapperService mapperService = Mockito.mock(MapperService.class); - Collection types = Collections.emptySet(); - Mockito.when(context.queryTypes()).thenReturn(types); Mockito.when(context.getMapperService()).thenReturn(mapperService); MappedFieldType ft = new IdFieldMapper.IdFieldType(() -> false); Query query = ft.termQuery("id", context); assertEquals(new TermInSetQuery("_id", Uid.encodeId("id")), query); - types = Collections.singleton("type"); - Mockito.when(context.queryTypes()).thenReturn(types); query = ft.termQuery("id", context); assertEquals(new TermInSetQuery("_id", Uid.encodeId("id")), query); } diff --git a/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java index 98edd61e2fc1f..c2c6293eec4bd 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java @@ -46,7 +46,7 @@ public void testFetchSourceValue() throws IOException { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath()); - RangeFieldMapper mapper = new RangeFieldMapper.Builder("field", RangeType.IP, true).build(context); + RangeFieldMapper mapper = new RangeFieldMapper.Builder("field", RangeType.IP, true, Version.V_EMPTY).build(context); Map range = org.opensearch.common.collect.Map.of("gte", "2001:db8:0:0:0:0:2:1"); assertEquals( Collections.singletonList(org.opensearch.common.collect.Map.of("gte", "2001:db8::2:1")), diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index 376a5b6360d00..3543fca856a20 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -33,7 +33,6 @@ package org.opensearch.index.mapper; import org.apache.lucene.analysis.TokenStream; -import org.opensearch.ExceptionsHelper; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesReference; @@ -66,7 +65,6 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -116,34 +114,6 @@ public void testTypeValidation() { MapperService.validateTypeName("_doc"); // no exception } - public void testIndexIntoDefaultMapping() throws Throwable { - // 1. test implicit index creation - ExecutionException e = expectThrows( - ExecutionException.class, - () -> client().prepareIndex("index1", MapperService.DEFAULT_MAPPING, "1").setSource("{}", XContentType.JSON).execute().get() - ); - Throwable throwable = ExceptionsHelper.unwrapCause(e.getCause()); - if (throwable instanceof IllegalArgumentException) { - assertEquals("It is forbidden to index into the default mapping [_default_]", throwable.getMessage()); - } else { - throw e; - } - - // 2. already existing index - IndexService indexService = createIndex("index2"); - e = expectThrows( - ExecutionException.class, - () -> { client().prepareIndex("index1", MapperService.DEFAULT_MAPPING, "2").setSource().execute().get(); } - ); - throwable = ExceptionsHelper.unwrapCause(e.getCause()); - if (throwable instanceof IllegalArgumentException) { - assertEquals("It is forbidden to index into the default mapping [_default_]", throwable.getMessage()); - } else { - throw e; - } - assertNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); - } - public void testPreflightUpdateDoesNotChangeMapping() throws Throwable { final MapperService mapperService = createIndex("test1").mapperService(); final CompressedXContent mapping = createMappingSpecifyingNumberOfFields(1); @@ -359,16 +329,6 @@ public void testTotalFieldsLimitWithFieldAlias() throws Throwable { assertEquals("Limit of total fields [" + numberOfNonAliasFields + "] has been exceeded", e.getMessage()); } - public void testDefaultMappingIsRejectedOn7() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_default_").endObject().endObject()); - MapperService mapperService = createIndex("test").mapperService(); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> mapperService.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE) - ); - assertEquals(MapperService.DEFAULT_MAPPING_ERROR_MESSAGE, e.getMessage()); - } - public void testFieldNameLengthLimit() throws Throwable { int maxFieldNameLength = randomIntBetween(25, 30); String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index c456e3ee11e3e..045cc97275eb7 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -1148,7 +1148,7 @@ public void testMergeNestedMappings() throws IOException { String mapping1 = Strings.toString( XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject(MapperService.SINGLE_MAPPING_NAME) .startObject("properties") .startObject("nested1") .field("type", "nested") @@ -1162,14 +1162,14 @@ public void testMergeNestedMappings() throws IOException { // cannot update `include_in_parent` dynamically MapperException e1 = expectThrows( MapperException.class, - () -> mapperService.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE) + () -> mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE) ); assertEquals("the [include_in_parent] parameter can't be updated on a nested object mapping", e1.getMessage()); String mapping2 = Strings.toString( XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject(MapperService.SINGLE_MAPPING_NAME) .startObject("properties") .startObject("nested1") .field("type", "nested") @@ -1183,7 +1183,7 @@ public void testMergeNestedMappings() throws IOException { // cannot update `include_in_root` dynamically MapperException e2 = expectThrows( MapperException.class, - () -> mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE) + () -> mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE) ); assertEquals("the [include_in_root] parameter can't be updated on a nested object mapping", e2.getMessage()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java index 31c90d380537d..0353173e25696 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.index.mapper.MapperService.MergeReason; import java.io.IOException; import java.net.InetAddress; @@ -374,4 +375,12 @@ public void testIllegalFormatField() throws Exception { assertThat(e.getMessage(), containsString("Invalid format: [[test_format]]: Unknown pattern letter: t")); } + public void testUpdatesWithSameMappings() throws Exception { + for (final String type : types()) { + final DocumentMapper mapper = createDocumentMapper(rangeFieldMapping(type, b -> { b.field("store", true); })); + + final Mapping mapping = mapper.mapping(); + mapper.merge(mapping, MergeReason.MAPPING_UPDATE); + } + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index c35830c5089ae..d4772f24cca93 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -536,16 +536,17 @@ public void testFetchSourceValue() throws IOException { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath()); - MappedFieldType longMapper = new RangeFieldMapper.Builder("field", RangeType.LONG, true).build(context).fieldType(); + MappedFieldType longMapper = new RangeFieldMapper.Builder("field", RangeType.LONG, true, Version.V_EMPTY).build(context) + .fieldType(); Map longRange = org.opensearch.common.collect.Map.of("gte", 3.14, "lt", "42.9"); assertEquals( Collections.singletonList(org.opensearch.common.collect.Map.of("gte", 3L, "lt", 42L)), fetchSourceValue(longMapper, longRange) ); - MappedFieldType dateMapper = new RangeFieldMapper.Builder("field", RangeType.DATE, true).format("yyyy/MM/dd||epoch_millis") - .build(context) - .fieldType(); + MappedFieldType dateMapper = new RangeFieldMapper.Builder("field", RangeType.DATE, true, Version.V_EMPTY).format( + "yyyy/MM/dd||epoch_millis" + ).build(context).fieldType(); Map dateRange = org.opensearch.common.collect.Map.of("lt", "1990/12/29", "gte", 597429487111L); assertEquals( Collections.singletonList(org.opensearch.common.collect.Map.of("lt", "1990/12/29", "gte", "1988/12/06")), @@ -557,14 +558,15 @@ public void testParseSourceValueWithFormat() throws IOException { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath()); - MappedFieldType longMapper = new RangeFieldMapper.Builder("field", RangeType.LONG, true).build(context).fieldType(); + MappedFieldType longMapper = new RangeFieldMapper.Builder("field", RangeType.LONG, true, Version.V_EMPTY).build(context) + .fieldType(); Map longRange = org.opensearch.common.collect.Map.of("gte", 3.14, "lt", "42.9"); assertEquals( Collections.singletonList(org.opensearch.common.collect.Map.of("gte", 3L, "lt", 42L)), fetchSourceValue(longMapper, longRange) ); - MappedFieldType dateMapper = new RangeFieldMapper.Builder("field", RangeType.DATE, true).format("strict_date_time") + MappedFieldType dateMapper = new RangeFieldMapper.Builder("field", RangeType.DATE, true, Version.V_EMPTY).format("strict_date_time") .build(context) .fieldType(); Map dateRange = org.opensearch.common.collect.Map.of("lt", "1990-12-29T00:00:00.000Z"); diff --git a/server/src/test/java/org/opensearch/index/query/CombineIntervalsSourceProviderTests.java b/server/src/test/java/org/opensearch/index/query/CombineIntervalsSourceProviderTests.java index ddc985356f268..4d5380221aa82 100644 --- a/server/src/test/java/org/opensearch/index/query/CombineIntervalsSourceProviderTests.java +++ b/server/src/test/java/org/opensearch/index/query/CombineIntervalsSourceProviderTests.java @@ -53,7 +53,7 @@ protected Combine createTestInstance() { @Override protected Combine mutateInstance(Combine instance) throws IOException { List subSources = instance.getSubSources(); - boolean ordered = instance.isOrdered(); + IntervalMode mode = instance.getMode(); int maxGaps = instance.getMaxGaps(); IntervalsSourceProvider.IntervalFilter filter = instance.getFilter(); switch (between(0, 3)) { @@ -63,7 +63,13 @@ protected Combine mutateInstance(Combine instance) throws IOException { : null; break; case 1: - ordered = !ordered; + if (mode == IntervalMode.ORDERED) { + mode = randomBoolean() ? IntervalMode.UNORDERED : IntervalMode.UNORDERED_NO_OVERLAP; + } else if (mode == IntervalMode.UNORDERED) { + mode = randomBoolean() ? IntervalMode.ORDERED : IntervalMode.UNORDERED_NO_OVERLAP; + } else { + mode = randomBoolean() ? IntervalMode.UNORDERED : IntervalMode.ORDERED; + } break; case 2: maxGaps++; @@ -76,7 +82,7 @@ protected Combine mutateInstance(Combine instance) throws IOException { default: throw new AssertionError("Illegal randomisation branch"); } - return new Combine(subSources, ordered, maxGaps, filter); + return new Combine(subSources, mode, maxGaps, filter); } @Override diff --git a/server/src/test/java/org/opensearch/index/query/CommonTermsQueryParserTests.java b/server/src/test/java/org/opensearch/index/query/CommonTermsQueryParserTests.java index 1723f402e6774..1dcda74308f45 100644 --- a/server/src/test/java/org/opensearch/index/query/CommonTermsQueryParserTests.java +++ b/server/src/test/java/org/opensearch/index/query/CommonTermsQueryParserTests.java @@ -45,7 +45,7 @@ public void testWhenParsedQueryIsNullNoNullPointerExceptionIsThrown() { CommonTermsQueryBuilder commonTermsQueryBuilder = new CommonTermsQueryBuilder("name", "the").queryName("query-name"); // the named query parses to null; we are testing this does not cause a NullPointerException - SearchResponse response = client().prepareSearch(index).setTypes(type).setQuery(commonTermsQueryBuilder).execute().actionGet(); + SearchResponse response = client().prepareSearch(index).setQuery(commonTermsQueryBuilder).execute().actionGet(); assertNotNull(response); assertEquals(response.getHits().getHits().length, 0); diff --git a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java index 9aac0e033dcef..3eab92d7e2112 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java @@ -51,7 +51,6 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.get.GetResult; -import org.opensearch.index.mapper.MapperService; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.geo.RandomShapeGenerator; @@ -94,12 +93,9 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder() { @Override protected GetResponse executeGet(GetRequest getRequest) { - String indexedType = indexedShapeType != null ? indexedShapeType : MapperService.SINGLE_MAPPING_NAME; - assertThat(indexedShapeToReturn, notNullValue()); assertThat(indexedShapeId, notNullValue()); assertThat(getRequest.id(), equalTo(indexedShapeId)); - assertThat(getRequest.type(), equalTo(indexedType)); assertThat(getRequest.routing(), equalTo(indexedShapeRouting)); String expectedShapeIndex = indexedShapeIndex == null ? GeoShapeQueryBuilder.DEFAULT_SHAPE_INDEX_NAME : indexedShapeIndex; assertThat(getRequest.index(), equalTo(expectedShapeIndex)); @@ -116,9 +112,7 @@ protected GetResponse executeGet(GetRequest getRequest) { } catch (IOException ex) { throw new OpenSearchException("boom", ex); } - return new GetResponse( - new GetResult(indexedShapeIndex, indexedType, indexedShapeId, 0, 1, 0, true, new BytesArray(json), null, null) - ); + return new GetResponse(new GetResult(indexedShapeIndex, indexedShapeId, 0, 1, 0, true, new BytesArray(json), null, null)); } @After diff --git a/server/src/test/java/org/opensearch/index/query/IdsQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/IdsQueryBuilderTests.java index e92395a1b27f2..6e03acb68e204 100644 --- a/server/src/test/java/org/opensearch/index/query/IdsQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/IdsQueryBuilderTests.java @@ -35,16 +35,11 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; -import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.ParsingException; import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.contains; @@ -52,47 +47,19 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase { - private Set assertedWarnings = new HashSet<>(); - @Override protected IdsQueryBuilder doCreateTestQueryBuilder() { - final String type; - if (randomBoolean()) { - if (frequently()) { - type = "_doc"; - } else { - type = randomAlphaOfLengthBetween(1, 10); - } - } else if (randomBoolean()) { - type = Metadata.ALL; - } else { - type = null; - } int numberOfIds = randomIntBetween(0, 10); String[] ids = new String[numberOfIds]; for (int i = 0; i < numberOfIds; i++) { ids[i] = randomAlphaOfLengthBetween(1, 10); } - IdsQueryBuilder query; - if (type != null && randomBoolean()) { - query = new IdsQueryBuilder().types(type); - query.addIds(ids); - } else { - query = new IdsQueryBuilder(); - query.addIds(ids); - } - return query; + return new IdsQueryBuilder().addIds(ids); } @Override protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { - boolean allTypes = queryBuilder.types().length == 0 || queryBuilder.types().length == 1 && "_all".equals(queryBuilder.types()[0]); - if (queryBuilder.ids().size() == 0 - // no types - || context.fieldMapper(IdFieldMapper.NAME) == null - // there are types, but disjoint from the query - || (allTypes == false - && Arrays.asList(queryBuilder.types()).indexOf(context.getMapperService().documentMapper().type()) == -1)) { + if (queryBuilder.ids().size() == 0) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); } else { assertThat(query, instanceOf(TermInSetQuery.class)); @@ -100,11 +67,8 @@ protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, Qu } public void testIllegalArguments() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new IdsQueryBuilder().types((String[]) null)); - assertEquals("[ids] types cannot be null", e.getMessage()); - IdsQueryBuilder idsQueryBuilder = new IdsQueryBuilder(); - e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[]) null)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[]) null)); assertEquals("[ids] ids cannot be null", e.getMessage()); } @@ -116,59 +80,21 @@ public void testIdsQueryWithInvalidValues() throws Exception { } public void testFromJson() throws IOException { - String json = "{\n" - + " \"ids\" : {\n" - + " \"type\" : [ \"my_type\" ],\n" - + " \"values\" : [ \"1\", \"100\", \"4\" ],\n" - + " \"boost\" : 1.0\n" - + " }\n" - + "}"; + String json = "{\n" + " \"ids\" : {\n" + " \"values\" : [ \"1\", \"100\", \"4\" ],\n" + " \"boost\" : 1.0\n" + " }\n" + "}"; IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(json); checkGeneratedJson(json, parsed); assertThat(parsed.ids(), contains("1", "100", "4")); - assertEquals(json, "my_type", parsed.types()[0]); // check that type that is not an array and also ids that are numbers are parsed - json = "{\n" - + " \"ids\" : {\n" - + " \"type\" : \"my_type\",\n" - + " \"values\" : [ 1, 100, 4 ],\n" - + " \"boost\" : 1.0\n" - + " }\n" - + "}"; + json = "{\n" + " \"ids\" : {\n" + " \"values\" : [ 1, 100, 4 ],\n" + " \"boost\" : 1.0\n" + " }\n" + "}"; parsed = (IdsQueryBuilder) parseQuery(json); assertThat(parsed.ids(), contains("1", "100", "4")); - assertEquals(json, "my_type", parsed.types()[0]); - - // check with empty type array - json = "{\n" - + " \"ids\" : {\n" - + " \"type\" : [ ],\n" - + " \"values\" : [ \"1\", \"100\", \"4\" ],\n" - + " \"boost\" : 1.0\n" - + " }\n" - + "}"; - parsed = (IdsQueryBuilder) parseQuery(json); - assertThat(parsed.ids(), contains("1", "100", "4")); - assertEquals(json, 0, parsed.types().length); - - // check without type - json = "{\n" + " \"ids\" : {\n" + " \"values\" : [ \"1\", \"100\", \"4\" ],\n" + " \"boost\" : 1.0\n" + " }\n" + "}"; - parsed = (IdsQueryBuilder) parseQuery(json); - assertThat(parsed.ids(), contains("1", "100", "4")); - assertEquals(json, 0, parsed.types().length); } @Override protected QueryBuilder parseQuery(XContentParser parser) throws IOException { QueryBuilder query = super.parseQuery(parser); assertThat(query, instanceOf(IdsQueryBuilder.class)); - - IdsQueryBuilder idsQuery = (IdsQueryBuilder) query; - if (idsQuery.types().length > 0 && !assertedWarnings.contains(IdsQueryBuilder.TYPES_DEPRECATION_MESSAGE)) { - assertWarnings(IdsQueryBuilder.TYPES_DEPRECATION_MESSAGE); - assertedWarnings.add(IdsQueryBuilder.TYPES_DEPRECATION_MESSAGE); - } return query; } diff --git a/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java b/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java index f2e42e82fc964..9cb8108818705 100644 --- a/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java @@ -78,6 +78,19 @@ public void testUnordered() throws IOException { } + public void testUnorderedNoOverlap() throws IOException { + + CannedTokenStream ts = new CannedTokenStream(new Token("term1", 1, 2), new Token("term2", 3, 4), new Token("term3", 5, 6)); + + IntervalsSource source = BUILDER.analyzeText(new CachingTokenFilter(ts), -1, IntervalMode.UNORDERED_NO_OVERLAP); + IntervalsSource expected = Intervals.unorderedNoOverlaps( + Intervals.unorderedNoOverlaps(Intervals.term("term1"), Intervals.term("term2")), + Intervals.term("term3") + ); + + assertEquals(expected, source); + } + public void testPhrase() throws IOException { CannedTokenStream ts = new CannedTokenStream(new Token("term1", 1, 2), new Token("term2", 3, 4), new Token("term3", 5, 6)); diff --git a/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java index b0a7ffc94a1aa..d7f57eef5e039 100644 --- a/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java @@ -36,11 +36,14 @@ import org.apache.lucene.queries.intervals.IntervalQuery; import org.apache.lucene.queries.intervals.Intervals; import org.apache.lucene.queries.intervals.IntervalsSource; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.CompiledAutomaton; +import org.apache.lucene.util.automaton.RegExp; import org.opensearch.common.ParsingException; import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; @@ -134,10 +137,24 @@ static IntervalsSourceProvider.Disjunction createRandomDisjunction(int depth, bo static IntervalsSourceProvider.Combine createRandomCombine(int depth, boolean useScripts) { int count = randomInt(5) + 1; List subSources = createRandomSourceList(depth, useScripts, count); - boolean ordered = randomBoolean(); + IntervalMode mode; + switch (randomIntBetween(0, 2)) { + case 0: + mode = IntervalMode.ORDERED; + break; + case 1: + mode = IntervalMode.UNORDERED; + break; + case 2: + mode = IntervalMode.UNORDERED_NO_OVERLAP; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + int maxGaps = randomInt(5) - 1; IntervalsSourceProvider.IntervalFilter filter = createRandomFilter(depth + 1, useScripts); - return new IntervalsSourceProvider.Combine(subSources, ordered, maxGaps, filter); + return new IntervalsSourceProvider.Combine(subSources, mode, maxGaps, filter); } static List createRandomSourceList(int depth, boolean useScripts, int count) { @@ -170,10 +187,23 @@ static IntervalsSourceProvider.Match createRandomMatch(int depth, boolean useScr words.add(randomRealisticUnicodeOfLengthBetween(4, 20)); } String text = String.join(" ", words); - boolean mOrdered = randomBoolean(); + IntervalMode mMode; + switch (randomIntBetween(0, 2)) { + case 0: + mMode = IntervalMode.ORDERED; + break; + case 1: + mMode = IntervalMode.UNORDERED; + break; + case 2: + mMode = IntervalMode.UNORDERED_NO_OVERLAP; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } int maxMGaps = randomInt(5) - 1; String analyzer = randomFrom("simple", "keyword", "whitespace"); - return new IntervalsSourceProvider.Match(text, maxMGaps, mOrdered, analyzer, createRandomFilter(depth + 1, useScripts), useField); + return new IntervalsSourceProvider.Match(text, maxMGaps, mMode, analyzer, createRandomFilter(depth + 1, useScripts), useField); } @Override @@ -187,7 +217,7 @@ public void testCacheability() throws IOException { IntervalsSourceProvider.IntervalFilter scriptFilter = new IntervalsSourceProvider.IntervalFilter( new Script(ScriptType.INLINE, "mockscript", "1", Collections.emptyMap()) ); - IntervalsSourceProvider source = new IntervalsSourceProvider.Match("text", 0, true, "simple", scriptFilter, null); + IntervalsSourceProvider source = new IntervalsSourceProvider.Match("text", 0, IntervalMode.ORDERED, "simple", scriptFilter, null); queryBuilder = new IntervalQueryBuilder(TEXT_FIELD_NAME, source); rewriteQuery = rewriteQuery(queryBuilder, new QueryShardContext(context)); assertNotNull(rewriteQuery.toQuery(context)); @@ -240,7 +270,7 @@ public void testMatchInterval() throws IOException { + "\" : { " + " \"match\" : { " + " \"query\" : \"Hello world\"," - + " \"ordered\" : true }," + + " \"mode\" : \"ordered\" }," + " \"boost\" : 2 } } }"; builder = (IntervalQueryBuilder) parseQuery(json); @@ -250,6 +280,90 @@ public void testMatchInterval() throws IOException { ); assertEquals(expected, builder.toQuery(createShardContext())); + json = "{ \"intervals\" : " + + "{ \"" + + TEXT_FIELD_NAME + + "\" : { " + + " \"match\" : { " + + " \"query\" : \"Hello world\"," + + " \"mode\" : \"unordered_no_overlap\" }," + + " \"boost\" : 2 } } }"; + + builder = (IntervalQueryBuilder) parseQuery(json); + expected = new BoostQuery( + new IntervalQuery(TEXT_FIELD_NAME, Intervals.unorderedNoOverlaps(Intervals.term("hello"), Intervals.term("world"))), + 2 + ); + assertEquals(expected, builder.toQuery(createShardContext())); + + json = "{ \"intervals\" : " + + "{ \"" + + TEXT_FIELD_NAME + + "\" : { " + + " \"match\" : { " + + " \"query\" : \"Hello world\"," + + " \"mode\" : \"unordered_no_overlap\"," + + " \"max_gaps\" : 11 }," + + " \"boost\" : 2 } } }"; + + builder = (IntervalQueryBuilder) parseQuery(json); + expected = new BoostQuery( + new IntervalQuery( + TEXT_FIELD_NAME, + Intervals.maxgaps(11, Intervals.unorderedNoOverlaps(Intervals.term("hello"), Intervals.term("world"))) + ), + 2 + ); + assertEquals(expected, builder.toQuery(createShardContext())); + + json = "{ \"intervals\" : " + + "{ \"" + + TEXT_FIELD_NAME + + "\" : { " + + " \"match\" : { " + + " \"query\" : \"Hello Open Search\"," + + " \"mode\" : \"unordered_no_overlap\" }," + + " \"boost\" : 3 } } }"; + + builder = (IntervalQueryBuilder) parseQuery(json); + expected = new BoostQuery( + new IntervalQuery( + TEXT_FIELD_NAME, + Intervals.unorderedNoOverlaps( + Intervals.unorderedNoOverlaps(Intervals.term("hello"), Intervals.term("open")), + Intervals.term("search") + ) + ), + 3 + ); + assertEquals(expected, builder.toQuery(createShardContext())); + + json = "{ \"intervals\" : " + + "{ \"" + + TEXT_FIELD_NAME + + "\" : { " + + " \"match\" : { " + + " \"query\" : \"Hello Open Search\"," + + " \"mode\" : \"unordered_no_overlap\"," + + " \"max_gaps\": 12 }," + + " \"boost\" : 3 } } }"; + + builder = (IntervalQueryBuilder) parseQuery(json); + expected = new BoostQuery( + new IntervalQuery( + TEXT_FIELD_NAME, + Intervals.maxgaps( + 12, + Intervals.unorderedNoOverlaps( + Intervals.maxgaps(12, Intervals.unorderedNoOverlaps(Intervals.term("hello"), Intervals.term("open"))), + Intervals.term("search") + ) + ) + ), + 3 + ); + assertEquals(expected, builder.toQuery(createShardContext())); + json = "{ \"intervals\" : " + "{ \"" + TEXT_FIELD_NAME @@ -258,7 +372,7 @@ public void testMatchInterval() throws IOException { + " \"query\" : \"Hello world\"," + " \"max_gaps\" : 10," + " \"analyzer\" : \"whitespace\"," - + " \"ordered\" : true } } } }"; + + " \"mode\" : \"ordered\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(json); expected = new IntervalQuery( @@ -278,7 +392,7 @@ public void testMatchInterval() throws IOException { + " \"use_field\" : \"" + MASKED_FIELD + "\"," - + " \"ordered\" : true } } } }"; + + " \"mode\" : \"ordered\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(json); expected = new IntervalQuery( @@ -295,7 +409,7 @@ public void testMatchInterval() throws IOException { + " \"query\" : \"Hello world\"," + " \"max_gaps\" : 10," + " \"analyzer\" : \"whitespace\"," - + " \"ordered\" : true," + + " \"mode\" : \"ordered\"," + " \"filter\" : {" + " \"containing\" : {" + " \"match\" : { \"query\" : \"blah\" } } } } } } }"; @@ -347,11 +461,11 @@ public void testCombineInterval() throws IOException { + TEXT_FIELD_NAME + "\": {" + " \"all_of\" : {" - + " \"ordered\" : true," + + " \"mode\" : \"ordered\"," + " \"intervals\" : [" + " { \"match\" : { \"query\" : \"one\" } }," + " { \"all_of\" : { " - + " \"ordered\" : false," + + " \"mode\" : \"unordered\"," + " \"intervals\" : [" + " { \"match\" : { \"query\" : \"two\" } }," + " { \"match\" : { \"query\" : \"three\" } } ] } } ]," @@ -378,6 +492,52 @@ public void testCombineInterval() throws IOException { ); assertEquals(expected, builder.toQuery(createShardContext())); + json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": {" + + " \"all_of\" : {" + + " \"mode\" : \"unordered_no_overlap\"," + + " \"intervals\" : [" + + " { \"match\" : { \"query\" : \"one\" } }," + + " { \"match\" : { \"query\" : \"two\" } } ]," + + " \"max_gaps\" : 30 }," + + " \"boost\" : 1.5 } } }"; + builder = (IntervalQueryBuilder) parseQuery(json); + expected = new BoostQuery( + new IntervalQuery( + TEXT_FIELD_NAME, + Intervals.maxgaps(30, Intervals.unorderedNoOverlaps(Intervals.term("one"), Intervals.term("two"))) + ), + 1.5f + ); + assertEquals(expected, builder.toQuery(createShardContext())); + + json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": {" + + " \"all_of\" : {" + + " \"mode\" : \"unordered_no_overlap\"," + + " \"intervals\" : [" + + " { \"match\" : { \"query\" : \"one\" } }," + + " { \"match\" : { \"query\" : \"two\" } }," + + " { \"match\" : { \"query\" : \"three\" } } ]," + + " \"max_gaps\" : 3 }," + + " \"boost\" : 3.5 } } }"; + builder = (IntervalQueryBuilder) parseQuery(json); + expected = new BoostQuery( + new IntervalQuery( + TEXT_FIELD_NAME, + Intervals.maxgaps( + 3, + Intervals.unorderedNoOverlaps( + Intervals.maxgaps(3, Intervals.unorderedNoOverlaps(Intervals.term("one"), Intervals.term("two"))), + Intervals.term("three") + ) + ) + ), + 3.5f + ); + assertEquals(expected, builder.toQuery(createShardContext())); } public void testCombineDisjunctionInterval() throws IOException { @@ -386,7 +546,7 @@ public void testCombineDisjunctionInterval() throws IOException { + TEXT_FIELD_NAME + "\": { " + " \"all_of\" : {" - + " \"ordered\" : true," + + " \"mode\" : \"ordered\"," + " \"intervals\" : [" + " { \"match\" : { \"query\" : \"atmosphere\" } }," + " { \"any_of\" : {" @@ -413,7 +573,7 @@ public void testCombineDisjunctionInterval() throws IOException { } public void testNonIndexedFields() throws IOException { - IntervalsSourceProvider provider = new IntervalsSourceProvider.Match("test", 0, true, null, null, null); + IntervalsSourceProvider provider = new IntervalsSourceProvider.Match("test", 0, IntervalMode.ORDERED, null, null, null); IntervalQueryBuilder b = new IntervalQueryBuilder("no_such_field", provider); assertThat(b.toQuery(createShardContext()), equalTo(new MatchNoDocsQuery())); @@ -443,7 +603,7 @@ public void testNonIndexedFields() throws IOException { + " \"use_field\" : \"" + NO_POSITIONS_FIELD + "\"," - + " \"ordered\" : true } } } }"; + + " \"mode\" : \"ordered\" } } } }"; e = expectThrows(IllegalArgumentException.class, () -> { IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json); @@ -654,6 +814,156 @@ public void testWildcard() throws IOException { builder = (IntervalQueryBuilder) parseQuery(fixed_field_analyzer_json); expected = new IntervalQuery(TEXT_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard(new BytesRef("Te?m")))); assertEquals(expected, builder.toQuery(createShardContext())); + + String wildcard_max_expand_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"max_expansions\" : 500 } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(wildcard_max_expand_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, Intervals.wildcard(new BytesRef("te?m"), 500)); + assertEquals(expected, builder.toQuery(createShardContext())); + + String wildcard_neg_max_expand_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"max_expansions\" : -20 } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(wildcard_neg_max_expand_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, Intervals.wildcard(new BytesRef("te?m"))); // max expansions use default + assertEquals(expected, builder.toQuery(createShardContext())); + + String wildcard_over_max_expand_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"max_expansions\" : " + + (BooleanQuery.getMaxClauseCount() + 1) + + " } } } }"; + expectThrows(IllegalArgumentException.class, () -> { + IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(wildcard_over_max_expand_json); + builder1.toQuery(createShardContext()); + }); + } + + private static IntervalsSource buildRegexpSource(String pattern, int flags, Integer maxExpansions) { + return buildRegexpSource(pattern, flags, 0, maxExpansions); + } + + private static IntervalsSource buildRegexpSource(String pattern, int flags, int matchFlags, Integer maxExpansions) { + final RegExp regexp = new RegExp(pattern, flags, matchFlags); + CompiledAutomaton automaton = new CompiledAutomaton(regexp.toAutomaton()); + + if (maxExpansions != null) { + return Intervals.multiterm(automaton, maxExpansions, regexp.toString()); + } else { + return Intervals.multiterm(automaton, regexp.toString()); + } + } + + public void testRegexp() throws IOException { + final int DEFAULT_FLAGS = RegexpFlag.ALL.value(); + String json = "{ \"intervals\" : { \"" + TEXT_FIELD_NAME + "\": { " + "\"regexp\" : { \"pattern\" : \"te.m\" } } } }"; + + IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json); + Query expected = new IntervalQuery(TEXT_FIELD_NAME, buildRegexpSource("te.m", DEFAULT_FLAGS, null)); + assertEquals(expected, builder.toQuery(createShardContext())); + + String no_positions_json = "{ \"intervals\" : { \"" + + NO_POSITIONS_FIELD + + "\": { " + + "\"regexp\" : { \"pattern\" : \"[Tt]erm\" } } } }"; + expectThrows(IllegalArgumentException.class, () -> { + IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(no_positions_json); + builder1.toQuery(createShardContext()); + }); + + String fixed_field_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"use_field\" : \"masked_field\" } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(fixed_field_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, Intervals.fixField(MASKED_FIELD, buildRegexpSource("te.m", DEFAULT_FLAGS, null))); + assertEquals(expected, builder.toQuery(createShardContext())); + + String fixed_field_json_no_positions = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"use_field\" : \"" + + NO_POSITIONS_FIELD + + "\" } } } }"; + expectThrows(IllegalArgumentException.class, () -> { + IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(fixed_field_json_no_positions); + builder1.toQuery(createShardContext()); + }); + + String flags_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"flags\" : \"NONE\" } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(flags_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, buildRegexpSource("te.m", RegexpFlag.NONE.value(), null)); + assertEquals(expected, builder.toQuery(createShardContext())); + + String flags_value_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"flags_value\" : \"" + + RegexpFlag.ANYSTRING.value() + + "\" } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(flags_value_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, buildRegexpSource("te.m", RegexpFlag.ANYSTRING.value(), null)); + assertEquals(expected, builder.toQuery(createShardContext())); + + String regexp_max_expand_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"max_expansions\" : 500 } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(regexp_max_expand_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, buildRegexpSource("te.m", DEFAULT_FLAGS, 500)); + assertEquals(expected, builder.toQuery(createShardContext())); + + String regexp_case_insensitive_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"TE.M\", \"case_insensitive\" : true } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(regexp_case_insensitive_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, buildRegexpSource("TE.M", DEFAULT_FLAGS, RegExp.ASCII_CASE_INSENSITIVE, null)); + assertEquals(expected, builder.toQuery(createShardContext())); + + String regexp_neg_max_expand_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"max_expansions\" : -20 } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(regexp_neg_max_expand_json); + // max expansions use default + expected = new IntervalQuery(TEXT_FIELD_NAME, buildRegexpSource("te.m", DEFAULT_FLAGS, null)); + assertEquals(expected, builder.toQuery(createShardContext())); + + String regexp_over_max_expand_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"max_expansions\" : " + + (BooleanQuery.getMaxClauseCount() + 1) + + " } } } }"; + expectThrows(IllegalArgumentException.class, () -> { + IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(regexp_over_max_expand_json); + builder1.toQuery(createShardContext()); + }); + + String regexp_max_expand_with_flags_json = "{ \"intervals\" : { \"" + + TEXT_FIELD_NAME + + "\": { " + + "\"regexp\" : { \"pattern\" : \"te.m\", \"flags\": \"NONE\", \"max_expansions\" : 500 } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(regexp_max_expand_with_flags_json); + expected = new IntervalQuery(TEXT_FIELD_NAME, buildRegexpSource("te.m", RegexpFlag.NONE.value(), 500)); + assertEquals(expected, builder.toQuery(createShardContext())); } private static IntervalsSource buildFuzzySource(String term, String label, int prefixLength, boolean transpositions, int editDistance) { diff --git a/server/src/test/java/org/opensearch/index/query/MatchIntervalsSourceProviderTests.java b/server/src/test/java/org/opensearch/index/query/MatchIntervalsSourceProviderTests.java index 2725df7dd06d9..3f9d075d72532 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchIntervalsSourceProviderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchIntervalsSourceProviderTests.java @@ -53,7 +53,7 @@ protected Match createTestInstance() { protected Match mutateInstance(Match instance) throws IOException { String query = instance.getQuery(); int maxGaps = instance.getMaxGaps(); - boolean isOrdered = instance.isOrdered(); + IntervalMode mode = instance.getMode(); String analyzer = instance.getAnalyzer(); IntervalsSourceProvider.IntervalFilter filter = instance.getFilter(); String useField = instance.getUseField(); @@ -65,7 +65,13 @@ protected Match mutateInstance(Match instance) throws IOException { maxGaps++; break; case 2: - isOrdered = !isOrdered; + if (mode == IntervalMode.ORDERED) { + mode = randomBoolean() ? IntervalMode.UNORDERED : IntervalMode.UNORDERED_NO_OVERLAP; + } else if (mode == IntervalMode.UNORDERED) { + mode = randomBoolean() ? IntervalMode.ORDERED : IntervalMode.UNORDERED_NO_OVERLAP; + } else { + mode = randomBoolean() ? IntervalMode.UNORDERED : IntervalMode.ORDERED; + } break; case 3: analyzer = analyzer == null ? randomAlphaOfLength(5) : null; @@ -81,7 +87,7 @@ protected Match mutateInstance(Match instance) throws IOException { default: throw new AssertionError("Illegal randomisation branch"); } - return new Match(query, maxGaps, isOrdered, analyzer, filter, useField); + return new Match(query, maxGaps, mode, analyzer, filter, useField); } @Override diff --git a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java index 0b75e9eb32314..2061378c3f54f 100644 --- a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -112,14 +112,7 @@ private static String[] randomStringFields() { private Item generateRandomItem() { String index = randomBoolean() ? getIndex().getName() : null; // indexed item or artificial document - Item item; - - if (randomBoolean()) { - item = randomBoolean() ? new Item(index, randomAlphaOfLength(10)) : new Item(index, randomArtificialDoc()); - } else { - String type = "doc"; - item = randomBoolean() ? new Item(index, type, randomAlphaOfLength(10)) : new Item(index, type, randomArtificialDoc()); - } + Item item = randomBoolean() ? new Item(index, randomAlphaOfLength(10)) : new Item(index, randomArtificialDoc()); // if no field is specified MLT uses all mapped fields for this item if (randomBoolean()) { @@ -247,7 +240,7 @@ protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsReque MultiTermVectorsItemResponse[] responses = new MultiTermVectorsItemResponse[mtvRequest.size()]; int i = 0; for (TermVectorsRequest request : mtvRequest) { - TermVectorsResponse response = new TermVectorsResponse(request.index(), request.type(), request.id()); + TermVectorsResponse response = new TermVectorsResponse(request.index(), request.id()); response.setExists(true); Fields generatedFields; if (request.doc() != null) { @@ -449,11 +442,9 @@ public void testFromJson() throws IOException { + " \"fields\" : [ \"title\", \"description\" ],\n" + " \"like\" : [ \"and potentially some more text here as well\", {\n" + " \"_index\" : \"imdb\",\n" - + " \"_type\" : \"movies\",\n" + " \"_id\" : \"1\"\n" + " }, {\n" + " \"_index\" : \"imdb\",\n" - + " \"_type\" : \"movies\",\n" + " \"_id\" : \"2\"\n" + " } ],\n" + " \"max_query_terms\" : 12,\n" @@ -481,12 +472,6 @@ public void testFromJson() throws IOException { protected QueryBuilder parseQuery(XContentParser parser) throws IOException { QueryBuilder query = super.parseQuery(parser); assertThat(query, instanceOf(MoreLikeThisQueryBuilder.class)); - - MoreLikeThisQueryBuilder mltQuery = (MoreLikeThisQueryBuilder) query; - if (mltQuery.isTypeless() == false && !assertedWarnings.contains(MoreLikeThisQueryBuilder.TYPES_DEPRECATION_MESSAGE)) { - assertWarnings(MoreLikeThisQueryBuilder.TYPES_DEPRECATION_MESSAGE); - assertedWarnings.add(MoreLikeThisQueryBuilder.TYPES_DEPRECATION_MESSAGE); - } return query; } diff --git a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java index f9db638c8245b..b803e7b5686dc 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java @@ -209,14 +209,10 @@ public void testIndexSortedOnField() { } public void testFielddataLookupSelfReference() { - QueryShardContext queryShardContext = createQueryShardContext( - "uuid", - null, - (field, leafLookup, docId) -> { - // simulate a runtime field that depends on itself e.g. field: doc['field'] - return leafLookup.doc().get(field).toString(); - } - ); + QueryShardContext queryShardContext = createQueryShardContext("uuid", null, (field, leafLookup, docId) -> { + // simulate a runtime field that depends on itself e.g. field: doc['field'] + return leafLookup.doc().get(field).toString(); + }); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> collect("field", queryShardContext)); assertEquals("Cyclic dependency detected while resolving runtime fields: field -> field", iae.getMessage()); } diff --git a/server/src/test/java/org/opensearch/index/query/RandomQueryBuilder.java b/server/src/test/java/org/opensearch/index/query/RandomQueryBuilder.java index 8efff55a65d1a..03930edbf89d6 100644 --- a/server/src/test/java/org/opensearch/index/query/RandomQueryBuilder.java +++ b/server/src/test/java/org/opensearch/index/query/RandomQueryBuilder.java @@ -34,7 +34,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.opensearch.common.Strings; import java.util.Random; @@ -63,7 +62,7 @@ public static QueryBuilder createQuery(Random r) { case 2: // We make sure this query has no types to avoid deprecation warnings in the // tests that use this method. - return new IdsQueryBuilderTests().createTestQueryBuilder().types(Strings.EMPTY_ARRAY); + return new IdsQueryBuilderTests().createTestQueryBuilder(); case 3: return createMultiTermQuery(r); default: diff --git a/server/src/test/java/org/opensearch/index/query/RegexpIntervalsSourceProviderTests.java b/server/src/test/java/org/opensearch/index/query/RegexpIntervalsSourceProviderTests.java new file mode 100644 index 0000000000000..bf6809e5cb446 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/RegexpIntervalsSourceProviderTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import static org.opensearch.index.query.IntervalsSourceProvider.Regexp; +import static org.opensearch.index.query.IntervalsSourceProvider.fromXContent; + +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +public class RegexpIntervalsSourceProviderTests extends AbstractSerializingTestCase { + private static final List FLAGS = Arrays.asList("INTERSECTION", "COMPLEMENT", "EMPTY", "ANYSTRING", "INTERVAL", "NONE"); + + @Override + protected Regexp createTestInstance() { + return createRandomRegexp(); + } + + static Regexp createRandomRegexp() { + return new Regexp( + randomAlphaOfLengthBetween(0, 3) + (randomBoolean() ? ".*?" : "." + randomAlphaOfLength(4)) + randomAlphaOfLengthBetween(0, 5), + randomBoolean() ? RegexpFlag.resolveValue(randomFrom(FLAGS)) : RegexpFlag.ALL.value(), + randomBoolean() ? randomAlphaOfLength(10) : null, + randomBoolean() ? randomIntBetween(-1, Integer.MAX_VALUE) : null, + randomBoolean() + ); + } + + @Override + protected Regexp mutateInstance(Regexp instance) throws IOException { + String pattern = instance.getPattern(); + int flags = instance.getFlags(); + String useField = instance.getUseField(); + Integer maxExpansions = instance.getMaxExpansions(); + boolean caseInsensitive = instance.isCaseInsensitive(); + + int ran = between(0, 4); + switch (ran) { + case 0: + pattern += randomBoolean() ? ".*?" : randomAlphaOfLength(5); + break; + case 1: + flags = (flags == RegexpFlag.ALL.value()) ? RegexpFlag.resolveValue(randomFrom(FLAGS)) : RegexpFlag.ALL.value(); + break; + case 2: + useField = useField == null ? randomAlphaOfLength(5) : null; + break; + case 3: + maxExpansions = maxExpansions == null ? randomIntBetween(1, Integer.MAX_VALUE) : null; + break; + case 4: + caseInsensitive = !caseInsensitive; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new Regexp(pattern, flags, useField, maxExpansions, caseInsensitive); + } + + @Override + protected Writeable.Reader instanceReader() { + return Regexp::new; + } + + @Override + protected Regexp doParseInstance(XContentParser parser) throws IOException { + if (parser.nextToken() == XContentParser.Token.START_OBJECT) { + parser.nextToken(); + } + Regexp regexp = (Regexp) fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + return regexp; + } +} diff --git a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java index 9e6c496c1a0ca..e37b4f1a1c39f 100644 --- a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java @@ -241,9 +241,7 @@ public GetResponse executeGet(GetRequest getRequest) { } catch (IOException ex) { throw new OpenSearchException("boom", ex); } - return new GetResponse( - new GetResult(getRequest.index(), getRequest.type(), getRequest.id(), 0, 1, 0, true, new BytesArray(json), null, null) - ); + return new GetResponse(new GetResult(getRequest.index(), getRequest.id(), 0, 1, 0, true, new BytesArray(json), null, null)); } public void testNumeric() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/query/WildcardIntervalsSourceProviderTests.java b/server/src/test/java/org/opensearch/index/query/WildcardIntervalsSourceProviderTests.java index 88623ab4d83b8..784159005e1e3 100644 --- a/server/src/test/java/org/opensearch/index/query/WildcardIntervalsSourceProviderTests.java +++ b/server/src/test/java/org/opensearch/index/query/WildcardIntervalsSourceProviderTests.java @@ -51,7 +51,8 @@ static Wildcard createRandomWildcard() { return new Wildcard( randomAlphaOfLength(10), randomBoolean() ? randomAlphaOfLength(10) : null, - randomBoolean() ? randomAlphaOfLength(10) : null + randomBoolean() ? randomAlphaOfLength(10) : null, + randomBoolean() ? randomIntBetween(-1, Integer.MAX_VALUE) : null ); } @@ -60,7 +61,8 @@ protected Wildcard mutateInstance(Wildcard instance) throws IOException { String wildcard = instance.getPattern(); String analyzer = instance.getAnalyzer(); String useField = instance.getUseField(); - switch (between(0, 2)) { + Integer maxExpansions = instance.getMaxExpansions(); + switch (between(0, 3)) { case 0: wildcard += "a"; break; @@ -70,10 +72,13 @@ protected Wildcard mutateInstance(Wildcard instance) throws IOException { case 2: useField = useField == null ? randomAlphaOfLength(5) : null; break; + case 3: + maxExpansions = maxExpansions == null ? randomIntBetween(1, Integer.MAX_VALUE) : null; + break; default: throw new AssertionError("Illegal randomisation branch"); } - return new Wildcard(wildcard, analyzer, useField); + return new Wildcard(wildcard, analyzer, useField, maxExpansions); } @Override diff --git a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java index eaf283ae56f38..6ee3f3c0bced4 100644 --- a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java @@ -81,14 +81,7 @@ public void testRountTrip() throws IOException { private List randomIndexingFailures() { return usually() ? emptyList() - : singletonList( - new Failure( - randomSimpleString(random()), - randomSimpleString(random()), - randomSimpleString(random()), - new IllegalArgumentException("test") - ) - ); + : singletonList(new Failure(randomSimpleString(random()), randomSimpleString(random()), new IllegalArgumentException("test"))); } private List randomSearchFailures() { @@ -119,7 +112,6 @@ private void assertResponseEquals(BulkByScrollResponse expected, BulkByScrollRes Failure expectedFailure = expected.getBulkFailures().get(i); Failure actualFailure = actual.getBulkFailures().get(i); assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); - assertEquals(expectedFailure.getType(), actualFailure.getType()); assertEquals(expectedFailure.getId(), actualFailure.getId()); assertEquals(expectedFailure.getMessage(), actualFailure.getMessage()); assertEquals(expectedFailure.getStatus(), actualFailure.getStatus()); @@ -150,7 +142,6 @@ public static void assertEqualBulkResponse( Failure expectedFailure = expected.getBulkFailures().get(i); Failure actualFailure = actual.getBulkFailures().get(i); assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); - assertEquals(expectedFailure.getType(), actualFailure.getType()); assertEquals(expectedFailure.getId(), actualFailure.getId()); assertEquals(expectedFailure.getStatus(), actualFailure.getStatus()); } diff --git a/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java b/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java index f88b257d76a2f..3ff4b3ec26231 100644 --- a/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java @@ -95,30 +95,6 @@ protected void extraForSliceAssertions(DeleteByQueryRequest original, DeleteByQu // No extra assertions needed } - public void testTypesGetter() { - int numTypes = between(1, 50); - String[] types = new String[numTypes]; - for (int i = 0; i < numTypes; i++) { - types[i] = randomSimpleString(random(), 1, 30); - } - SearchRequest searchRequest = new SearchRequest(); - searchRequest.types(types); - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); - assertArrayEquals(request.types(), types); - } - - public void testTypesSetter() { - int numTypes = between(1, 50); - String[] types = new String[numTypes]; - for (int i = 0; i < numTypes; i++) { - types[i] = randomSimpleString(random(), 1, 30); - } - SearchRequest searchRequest = new SearchRequest(); - DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); - request.types(types); - assertArrayEquals(request.types(), types); - } - public void testValidateGivenNoQuery() { SearchRequest searchRequest = new SearchRequest(); DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(searchRequest); diff --git a/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java b/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java index ac999d34785ea..6fe277ad2751b 100644 --- a/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java @@ -112,9 +112,6 @@ protected ReindexRequest createTestInstance() { if (randomBoolean()) { reindexRequest.setSourceBatchSize(randomInt(100)); } - if (randomBoolean()) { - reindexRequest.setDestDocType("type"); - } if (randomBoolean()) { reindexRequest.setDestOpType("create"); } @@ -160,7 +157,6 @@ protected void assertEqualInstances(ReindexRequest expectedInstance, ReindexRequ assertEquals(expectedInstance.getDestination().getPipeline(), newInstance.getDestination().getPipeline()); assertEquals(expectedInstance.getDestination().routing(), newInstance.getDestination().routing()); assertEquals(expectedInstance.getDestination().opType(), newInstance.getDestination().opType()); - assertEquals(expectedInstance.getDestination().type(), newInstance.getDestination().type()); } public void testReindexFromRemoteDoesNotSupportSearchQuery() { diff --git a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java index 5a366574fd397..6e2efe56a69d7 100644 --- a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java @@ -173,7 +173,7 @@ public void cleanFiles( public void testRetryAppendOnlyAfterRecovering() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); - final IndexRequest originalRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); + final IndexRequest originalRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); originalRequest.process(Version.CURRENT, null, index.getName()); final IndexRequest retryRequest = copyIndexRequest(originalRequest); retryRequest.onRetry(); @@ -214,7 +214,7 @@ public IndexResult index(Index op) throws IOException { }) { shards.startAll(); Thread thread = new Thread(() -> { - IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); try { shards.index(indexRequest); } catch (Exception e) { @@ -244,7 +244,7 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener replicas = shards.getReplicas(); IndexShard replica1 = replicas.get(0); - IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON); logger.info("--> isolated replica " + replica1.routingEntry()); BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, shards.getPrimary()); for (int i = 1; i < replicas.size(); i++) { @@ -332,7 +332,7 @@ public void testConflictingOpsOnReplica() throws Exception { logger.info("--> promoting replica to primary " + replica1.routingEntry()); shards.promoteReplicaToPrimary(replica1).get(); - indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"2\"}", XContentType.JSON); + indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"2\"}", XContentType.JSON); shards.index(indexRequest); shards.refresh("test"); for (IndexShard shard : shards) { @@ -362,7 +362,7 @@ public void testReplicaTermIncrementWithConcurrentPrimaryPromotion() throws Exce assertEquals(primaryPrimaryTerm, replica2.getPendingPrimaryTerm()); - IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON); BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, replica1); CyclicBarrier barrier = new CyclicBarrier(2); @@ -405,7 +405,7 @@ public void testReplicaOperationWithConcurrentPrimaryPromotion() throws Exceptio try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetadata(1, mappings))) { shards.startAll(); long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm(); - IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON); BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, shards.getPrimary()); List replicas = shards.getReplicas(); @@ -485,7 +485,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { shards.startPrimary(); long primaryTerm = shards.getPrimary().getPendingPrimaryTerm(); List expectedTranslogOps = new ArrayList<>(); - BulkItemResponse indexResp = shards.index(new IndexRequest(index.getName(), "type", "1").source("{}", XContentType.JSON)); + BulkItemResponse indexResp = shards.index(new IndexRequest(index.getName()).id("1").source("{}", XContentType.JSON)); assertThat(indexResp.isFailed(), equalTo(true)); assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); expectedTranslogOps.add(new Translog.NoOp(0, primaryTerm, indexException.toString())); @@ -508,18 +508,12 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { assertThat(snapshot.totalOperations(), equalTo(0)); } } - try ( - Translog.Snapshot snapshot = shard.getHistoryOperations( - "test", - shard.indexSettings().isSoftDeleteEnabled() ? Engine.HistorySource.INDEX : Engine.HistorySource.TRANSLOG, - 0 - ) - ) { + try (Translog.Snapshot snapshot = shard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false)) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); } } // the failure replicated directly from the replication channel. - indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); + indexResp = shards.index(new IndexRequest(index.getName()).id("any").source("{}", XContentType.JSON)); assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); Translog.NoOp noop2 = new Translog.NoOp(1, primaryTerm, indexException.toString()); expectedTranslogOps.add(noop2); @@ -532,13 +526,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(Collections.singletonList(noop2))); } } - try ( - Translog.Snapshot snapshot = shard.getHistoryOperations( - "test", - shard.indexSettings().isSoftDeleteEnabled() ? Engine.HistorySource.INDEX : Engine.HistorySource.TRANSLOG, - 0 - ) - ) { + try (Translog.Snapshot snapshot = shard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false)) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); } } @@ -552,9 +540,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { public void testRequestFailureReplication() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); - BulkItemResponse response = shards.index( - new IndexRequest(index.getName(), "type", "1").source("{}", XContentType.JSON).version(2) - ); + BulkItemResponse response = shards.index(new IndexRequest(index.getName()).id("1").source("{}", XContentType.JSON).version(2)); assertTrue(response.isFailed()); assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); shards.assertAllEqual(0); @@ -572,7 +558,7 @@ public void testRequestFailureReplication() throws Exception { shards.addReplica(); } shards.startReplicas(nReplica); - response = shards.index(new IndexRequest(index.getName(), "type", "1").source("{}", XContentType.JSON).version(2)); + response = shards.index(new IndexRequest(index.getName()).id("1").source("{}", XContentType.JSON).version(2)); assertTrue(response.isFailed()); assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); shards.assertAllEqual(0); @@ -605,7 +591,7 @@ public void testSeqNoCollision() throws Exception { shards.syncGlobalCheckpoint(); logger.info("--> Isolate replica1"); - IndexRequest indexDoc1 = new IndexRequest(index.getName(), "type", "d1").source("{}", XContentType.JSON); + IndexRequest indexDoc1 = new IndexRequest(index.getName()).id("d1").source("{}", XContentType.JSON); BulkShardRequest replicationRequest = indexOnPrimary(indexDoc1, shards.getPrimary()); indexOnReplica(replicationRequest, shards, replica2); @@ -625,7 +611,7 @@ public void testSeqNoCollision() throws Exception { } logger.info("--> Promote replica1 as the primary"); shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed. - shards.index(new IndexRequest(index.getName(), "type", "d2").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("d2").source("{}", XContentType.JSON)); final Translog.Operation op2; try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(1)); @@ -675,10 +661,10 @@ public void testLateDeliveryAfterGCTriggeredOnReplica() throws Exception { updateGCDeleteCycle(replica, gcInterval); final BulkShardRequest indexRequest = indexOnPrimary( - new IndexRequest(index.getName(), "type", "d1").source("{}", XContentType.JSON), + new IndexRequest(index.getName()).id("d1").source("{}", XContentType.JSON), primary ); - final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName(), "type", "d1"), primary); + final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName()).id("d1"), primary); deleteOnReplica(deleteRequest, shards, replica); // delete arrives on replica first. final long deleteTimestamp = threadPool.relativeTimeInMillis(); replica.refresh("test"); @@ -712,11 +698,11 @@ public void testOutOfOrderDeliveryForAppendOnlyOperations() throws Exception { final IndexShard replica = shards.getReplicas().get(0); // Append-only request - without id final BulkShardRequest indexRequest = indexOnPrimary( - new IndexRequest(index.getName(), "type", null).source("{}", XContentType.JSON), + new IndexRequest(index.getName()).id(null).source("{}", XContentType.JSON), primary ); final String docId = Iterables.get(getShardDocUIDs(primary), 0); - final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName(), "type", docId), primary); + final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName()).id(docId), primary); deleteOnReplica(deleteRequest, shards, replica); indexOnReplica(indexRequest, shards, replica); shards.assertAllEqual(0); @@ -732,12 +718,12 @@ public void testIndexingOptimizationUsingSequenceNumbers() throws Exception { for (int i = 0; i < numDocs; i++) { String id = Integer.toString(randomIntBetween(1, 100)); if (randomBoolean()) { - group.index(new IndexRequest(index.getName(), "type", id).source("{}", XContentType.JSON)); + group.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); if (liveDocs.add(id) == false) { versionLookups++; } } else { - group.delete(new DeleteRequest(index.getName(), "type", id)); + group.delete(new DeleteRequest(index.getName(), id)); liveDocs.remove(id); versionLookups++; } diff --git a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java index 8023c19a9648c..cccb2f470195b 100644 --- a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java @@ -51,7 +51,6 @@ import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -134,74 +133,6 @@ public void testIndexingDuringFileRecovery() throws Exception { } } - public void testRecoveryOfDisconnectedReplica() throws Exception { - Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); - try (ReplicationGroup shards = createGroup(1, settings)) { - shards.startAll(); - int docs = shards.indexDocs(randomInt(50)); - shards.flush(); - final IndexShard originalReplica = shards.getReplicas().get(0); - for (int i = 0; i < randomInt(2); i++) { - final int indexedDocs = shards.indexDocs(randomInt(5)); - docs += indexedDocs; - - final boolean flush = randomBoolean(); - if (flush) { - originalReplica.flush(new FlushRequest()); - } - } - - // simulate a background global checkpoint sync at which point we expect the global checkpoint to advance on the replicas - shards.syncGlobalCheckpoint(); - long globalCheckpointOnReplica = originalReplica.getLastSyncedGlobalCheckpoint(); - Optional safeCommitOnReplica = originalReplica.store() - .findSafeIndexCommit(globalCheckpointOnReplica); - assertTrue(safeCommitOnReplica.isPresent()); - shards.removeReplica(originalReplica); - - final int missingOnReplica = shards.indexDocs(randomInt(5)); - docs += missingOnReplica; - - final boolean translogTrimmed; - if (randomBoolean()) { - shards.flush(); - translogTrimmed = randomBoolean(); - if (translogTrimmed) { - final Translog translog = getTranslog(shards.getPrimary()); - translog.getDeletionPolicy().setRetentionAgeInMillis(0); - translog.trimUnreferencedReaders(); - } - } else { - translogTrimmed = false; - } - originalReplica.close("disconnected", false); - IOUtils.close(originalReplica.store()); - final IndexShard recoveredReplica = shards.addReplicaWithExistingPath( - originalReplica.shardPath(), - originalReplica.routingEntry().currentNodeId() - ); - shards.recoverReplica(recoveredReplica); - if (translogTrimmed && missingOnReplica > 0) { - // replica has something to catch up with, but since we trimmed the primary translog, we should fall back to full recovery - assertThat(recoveredReplica.recoveryState().getIndex().fileDetails(), not(empty())); - } else { - assertThat(recoveredReplica.recoveryState().getIndex().fileDetails(), empty()); - assertThat( - recoveredReplica.recoveryState().getTranslog().recoveredOperations(), - equalTo(Math.toIntExact(docs - 1 - safeCommitOnReplica.get().localCheckpoint)) - ); - assertThat( - recoveredReplica.recoveryState().getTranslog().totalLocal(), - equalTo(Math.toIntExact(globalCheckpointOnReplica - safeCommitOnReplica.get().localCheckpoint)) - ); - } - - docs += shards.indexDocs(randomInt(5)); - - shards.assertAllEqual(docs); - } - } - /* * Simulate a scenario with two replicas where one of the replicas receives an extra document, the other replica is promoted on primary * failure, the receiving replica misses the primary/replica re-sync and then recovers from the primary. We expect that a @@ -212,7 +143,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { shards.startAll(); final int docs = randomIntBetween(0, 16); for (int i = 0; i < docs; i++) { - shards.index(new IndexRequest("index", "type", Integer.toString(i)).source("{}", XContentType.JSON)); + shards.index(new IndexRequest("index").id(Integer.toString(i)).source("{}", XContentType.JSON)); } shards.flush(); @@ -279,10 +210,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { final int rollbackDocs = randomIntBetween(1, 5); logger.info("--> indexing {} rollback docs", rollbackDocs); for (int i = 0; i < rollbackDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "rollback_" + i).source( - "{}", - XContentType.JSON - ); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("rollback_" + i).source("{}", XContentType.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); indexOnReplica(bulkShardRequest, shards, replica); } @@ -400,7 +328,7 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { int staleDocs = scaledRandomIntBetween(1, 10); logger.info("--> indexing {} stale docs", staleDocs); for (int i = 0; i < staleDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "stale_" + i).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("stale_" + i).source("{}", XContentType.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); indexOnReplica(bulkShardRequest, shards, replica); } @@ -439,10 +367,8 @@ public void testResyncAfterPrimaryPromotion() throws Exception { int initialDocs = randomInt(10); for (int i = 0; i < initialDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "initial_doc_" + i).source( - "{ \"f\": \"normal\"}", - XContentType.JSON - ); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("initial_doc_" + i) + .source("{ \"f\": \"normal\"}", XContentType.JSON); shards.index(indexRequest); } @@ -459,10 +385,8 @@ public void testResyncAfterPrimaryPromotion() throws Exception { final int extraDocs = randomInt(5); logger.info("--> indexing {} extra docs", extraDocs); for (int i = 0; i < extraDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "extra_doc_" + i).source( - "{ \"f\": \"normal\"}", - XContentType.JSON - ); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_doc_" + i) + .source("{ \"f\": \"normal\"}", XContentType.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); indexOnReplica(bulkShardRequest, shards, newPrimary); } @@ -470,10 +394,8 @@ public void testResyncAfterPrimaryPromotion() throws Exception { final int extraDocsToBeTrimmed = randomIntBetween(0, 10); logger.info("--> indexing {} extra docs to be trimmed", extraDocsToBeTrimmed); for (int i = 0; i < extraDocsToBeTrimmed; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "extra_trimmed_" + i).source( - "{ \"f\": \"trimmed\"}", - XContentType.JSON - ); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_trimmed_" + i) + .source("{ \"f\": \"trimmed\"}", XContentType.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); // have to replicate to another replica != newPrimary one - the subject to trim indexOnReplica(bulkShardRequest, shards, justReplica); @@ -541,7 +463,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { final String id = "pending_" + i; threadPool.generic().submit(() -> { try { - shards.index(new IndexRequest(index.getName(), "type", id).source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); } catch (Exception e) { throw new AssertionError(e); } finally { @@ -632,7 +554,7 @@ public void indexTranslogOperations( replicaEngineFactory.latchIndexers(1); threadPool.generic().submit(() -> { try { - shards.index(new IndexRequest(index.getName(), "type", "pending").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("pending").source("{}", XContentType.JSON)); } catch (final Exception e) { throw new RuntimeException(e); } finally { @@ -644,7 +566,7 @@ public void indexTranslogOperations( replicaEngineFactory.awaitIndexersLatch(); // unblock indexing for the next doc replicaEngineFactory.allowIndexing(); - shards.index(new IndexRequest(index.getName(), "type", "completed").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("completed").source("{}", XContentType.JSON)); pendingDocActiveWithExtraDocIndexed.countDown(); } catch (final Exception e) { throw new AssertionError(e); @@ -684,7 +606,7 @@ public void indexTranslogOperations( // wait for the translog phase to complete and the recovery to block global checkpoint advancement assertBusy(() -> assertTrue(shards.getPrimary().pendingInSync())); { - shards.index(new IndexRequest(index.getName(), "type", "last").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("last").source("{}", XContentType.JSON)); final long expectedDocs = docs + 3L; assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); // recovery is now in the process of being completed, therefore the global checkpoint can not have advanced on the primary @@ -719,7 +641,7 @@ public void testTransferMaxSeenAutoIdTimestampOnResync() throws Exception { long maxTimestampOnReplica2 = -1; List replicationRequests = new ArrayList<>(); for (int numDocs = between(1, 10), i = 0; i < numDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); indexRequest.process(Version.CURRENT, null, index.getName()); final IndexRequest copyRequest; if (randomBoolean()) { @@ -777,13 +699,13 @@ public void testAddNewReplicas() throws Exception { int nextId = docId.incrementAndGet(); if (appendOnly) { String id = randomBoolean() ? Integer.toString(nextId) : null; - shards.index(new IndexRequest(index.getName(), "type", id).source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); } else if (frequently()) { String id = Integer.toString(frequently() ? nextId : between(0, nextId)); - shards.index(new IndexRequest(index.getName(), "type", id).source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); } else { String id = Integer.toString(between(0, nextId)); - shards.delete(new DeleteRequest(index.getName(), "type", id)); + shards.delete(new DeleteRequest(index.getName()).id(id)); } if (randomInt(100) < 10) { shards.getPrimary().flush(new FlushRequest()); @@ -818,7 +740,7 @@ public void testRollbackOnPromotion() throws Exception { int inFlightOps = scaledRandomIntBetween(10, 200); for (int i = 0; i < inFlightOps; i++) { String id = "extra-" + i; - IndexRequest primaryRequest = new IndexRequest(index.getName(), "type", id).source("{}", XContentType.JSON); + IndexRequest primaryRequest = new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON); BulkShardRequest replicationRequest = indexOnPrimary(primaryRequest, shards.getPrimary()); for (IndexShard replica : shards.getReplicas()) { if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java index abb2a7632e25b..143d11bbbf13f 100644 --- a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java @@ -187,14 +187,12 @@ public void testTurnOffTranslogRetentionAfterAllShardStarted() throws Exception } group.syncGlobalCheckpoint(); group.flush(); - assertBusy( - () -> { - // we turn off the translog retention policy using the generic threadPool - for (IndexShard shard : group) { - assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(0)); - } + assertBusy(() -> { + // we turn off the translog retention policy using the generic threadPool + for (IndexShard shard : group) { + assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(0)); } - ); + }); } } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 50f0fa54e01c0..8fe8a13de9910 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -1160,23 +1160,21 @@ public void testPeerRecoveryRetentionLeaseCreationAndRenewal() { equalTo(expectedLeaseIds) ); - assertAsTimePasses.accept( - () -> { - // Leases still don't expire - assertThat( - tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()), - equalTo(expectedLeaseIds) - ); + assertAsTimePasses.accept(() -> { + // Leases still don't expire + assertThat( + tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()), + equalTo(expectedLeaseIds) + ); - // Also leases are renewed before reaching half the expiry time - // noinspection OptionalGetWithoutIsPresent - assertThat( - tracker.getRetentionLeases() + " renewed before too long", - tracker.getRetentionLeases().leases().stream().mapToLong(RetentionLease::timestamp).min().getAsLong(), - greaterThanOrEqualTo(currentTimeMillis.get() - peerRecoveryRetentionLeaseRenewalTimeMillis) - ); - } - ); + // Also leases are renewed before reaching half the expiry time + // noinspection OptionalGetWithoutIsPresent + assertThat( + tracker.getRetentionLeases() + " renewed before too long", + tracker.getRetentionLeases().leases().stream().mapToLong(RetentionLease::timestamp).min().getAsLong(), + greaterThanOrEqualTo(currentTimeMillis.get() - peerRecoveryRetentionLeaseRenewalTimeMillis) + ); + }); IndexShardRoutingTable.Builder routingTableBuilder = new IndexShardRoutingTable.Builder(routingTable); for (ShardRouting replicaShard : routingTable.replicaShards()) { @@ -1188,17 +1186,15 @@ public void testPeerRecoveryRetentionLeaseCreationAndRenewal() { tracker.updateFromMaster(initialClusterStateVersion + randomLongBetween(1, 10), ids(activeAllocationIds), routingTable); - assertAsTimePasses.accept( - () -> { - // Leases still don't expire - assertThat( - tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()), - equalTo(expectedLeaseIds) - ); - // ... and any extra peer recovery retention leases are expired immediately since the shard is fully active - tracker.addPeerRecoveryRetentionLease(randomAlphaOfLength(10), randomNonNegativeLong(), ActionListener.wrap(() -> {})); - } - ); + assertAsTimePasses.accept(() -> { + // Leases still don't expire + assertThat( + tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()), + equalTo(expectedLeaseIds) + ); + // ... and any extra peer recovery retention leases are expired immediately since the shard is fully active + tracker.addPeerRecoveryRetentionLease(randomAlphaOfLength(10), randomNonNegativeLong(), ActionListener.wrap(() -> {})); + }); tracker.renewPeerRecoveryRetentionLeases(); assertTrue("expired extra lease", tracker.getRetentionLeases(true).v1()); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 5e9773b555f3a..54a88d57b2b69 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -131,21 +131,12 @@ public void testRetentionLeaseBackgroundSyncActionOnPrimary() throws Interrupted ); final CountDownLatch latch = new CountDownLatch(1); - action.shardOperationOnPrimary( - request, - indexShard, - new LatchedActionListener<>( - ActionTestUtils.assertNoFailureListener( - result -> { - // the retention leases on the shard should be persisted - verify(indexShard).persistRetentionLeases(); - // we should forward the request containing the current retention leases to the replica - assertThat(result.replicaRequest(), sameInstance(request)); - } - ), - latch - ) - ); + action.shardOperationOnPrimary(request, indexShard, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); + // we should forward the request containing the current retention leases to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + }), latch)); latch.await(); } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index d13b74f1988e2..60ee3360ff235 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -126,20 +126,14 @@ public void testRetentionLeaseSyncActionOnPrimary() { ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); - action.dispatchedShardOperationOnPrimary( - request, - indexShard, - ActionTestUtils.assertNoFailureListener( - result -> { - // the retention leases on the shard should be persisted - verify(indexShard).persistRetentionLeases(); - // we should forward the request containing the current retention leases to the replica - assertThat(result.replicaRequest(), sameInstance(request)); - // we should start with an empty replication response - assertNull(result.finalResponseIfSuccessful.getShardInfo()); - } - ) - ); + action.dispatchedShardOperationOnPrimary(request, indexShard, ActionTestUtils.assertNoFailureListener(result -> { + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); + // we should forward the request containing the current retention leases to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + // we should start with an empty replication response + assertNull(result.finalResponseIfSuccessful.getShardInfo()); + })); } public void testRetentionLeaseSyncActionOnReplica() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java index 77093327dcc19..3de5175bac7c5 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java @@ -337,40 +337,6 @@ public void testRetentionLeaseStats() throws IOException { } } - public void testRetentionLeasesActionsFailWithSoftDeletesDisabled() throws Exception { - IndexShard shard = newStartedShard(true, Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build()); - assertThat( - expectThrows( - AssertionError.class, - () -> shard.addRetentionLease( - randomAlphaOfLength(10), - randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE), - "test", - ActionListener.wrap(() -> {}) - ) - ).getMessage(), - equalTo("retention leases requires soft deletes but [index] does not have soft deletes enabled") - ); - assertThat( - expectThrows( - AssertionError.class, - () -> shard.renewRetentionLease( - randomAlphaOfLength(10), - randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE), - "test" - ) - ).getMessage(), - equalTo("retention leases requires soft deletes but [index] does not have soft deletes enabled") - ); - assertThat( - expectThrows(AssertionError.class, () -> shard.removeRetentionLease(randomAlphaOfLength(10), ActionListener.wrap(() -> {}))) - .getMessage(), - equalTo("retention leases requires soft deletes but [index] does not have soft deletes enabled") - ); - shard.syncRetentionLeases(); - closeShards(shard); - } - private void assertRetentionLeases( final IndexShard indexShard, final int size, diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 0d9dd49d73232..af3e0a6111399 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -70,7 +70,6 @@ import org.opensearch.common.Randomness; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -88,7 +87,6 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.internal.io.IOUtils; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; @@ -103,8 +101,6 @@ import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.ReadOnlyEngine; -import org.opensearch.index.engine.Segment; -import org.opensearch.index.engine.SegmentsStats; import org.opensearch.index.fielddata.FieldDataStats; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -202,7 +198,6 @@ import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -1741,9 +1736,7 @@ public void testRefreshMetric() throws IOException { long refreshCount = shard.refreshStats().getTotal(); indexDoc(shard, "_doc", "test"); try ( - Engine.GetResult ignored = shard.get( - new Engine.Get(true, false, "_doc", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))) - ) + Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test")))) ) { assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount + 1)); } @@ -1769,9 +1762,7 @@ public void testExternalRefreshMetric() throws IOException { final long extraInternalRefreshes = shard.routingEntry().primary() || shard.indexSettings().isSoftDeleteEnabled() == false ? 0 : 1; indexDoc(shard, "_doc", "test"); try ( - Engine.GetResult ignored = shard.get( - new Engine.Get(true, false, "_doc", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))) - ) + Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test")))) ) { assertThat(shard.refreshStats().getExternalTotal(), equalTo(externalRefreshCount)); assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 1 - extraInternalRefreshes)); @@ -2344,12 +2335,7 @@ public void testPrimaryHandOffUpdatesLocalCheckpoint() throws IOException { indexDoc(primarySource, "_doc", Integer.toString(i)); } IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); - final IndexShard primaryTarget = newShard( - primarySource.routingEntry().getTargetRelocatingShard(), - Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), primarySource.indexSettings().isSoftDeleteEnabled()) - .build() - ); + final IndexShard primaryTarget = newShard(primarySource.routingEntry().getTargetRelocatingShard()); updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); recoverReplica(primaryTarget, primarySource, true); @@ -2367,12 +2353,11 @@ public void testPrimaryHandOffUpdatesLocalCheckpoint() throws IOException { /* This test just verifies that we fill up local checkpoint up to max seen seqID on primary recovery */ public void testRecoverFromStoreWithNoOps() throws IOException { - final Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()).build(); - final IndexShard shard = newStartedShard(true, settings); + final IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0"); indexDoc(shard, "_doc", "1"); // start a replica shard and index the second doc - final IndexShard otherShard = newStartedShard(false, settings); + final IndexShard otherShard = newStartedShard(false); updateMappings(otherShard, shard.indexSettings().getIndexMetadata()); SourceToParse sourceToParse = new SourceToParse( shard.shardId().getIndexName(), @@ -2590,10 +2575,7 @@ public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedExc public void testRestoreShard() throws IOException { final IndexShard source = newStartedShard(true); - IndexShard target = newStartedShard( - true, - Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), source.indexSettings().isSoftDeleteEnabled()).build() - ); + IndexShard target = newStartedShard(true); indexDoc(source, "_doc", "0"); EngineTestCase.generateNewSeqNo(source.getEngine()); // create a gap in the history @@ -2674,11 +2656,7 @@ public void testReaderWrapperIsUsed() throws IOException { indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("test"); - try ( - Engine.GetResult getResult = shard.get( - new Engine.Get(false, false, "_doc", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))) - ) - ) { + try (Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) { assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); } @@ -2712,9 +2690,7 @@ public void testReaderWrapperIsUsed() throws IOException { assertEquals(search.totalHits.value, 1); } try ( - Engine.GetResult getResult = newShard.get( - new Engine.Get(false, false, "_doc", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))) - ) + Engine.GetResult getResult = newShard.get(new Engine.Get(false, false, "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))) ) { assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader @@ -3267,42 +3243,37 @@ public void testDocStats() throws Exception { indexDoc(indexShard, "_doc", id); } // Need to update and sync the global checkpoint and the retention leases for the soft-deletes retention MergePolicy. - if (indexShard.indexSettings.isSoftDeleteEnabled()) { - final long newGlobalCheckpoint = indexShard.getLocalCheckpoint(); - if (indexShard.routingEntry().primary()) { - indexShard.updateLocalCheckpointForShard( - indexShard.routingEntry().allocationId().getId(), - indexShard.getLocalCheckpoint() - ); - indexShard.updateGlobalCheckpointForShard( - indexShard.routingEntry().allocationId().getId(), - indexShard.getLocalCheckpoint() - ); - indexShard.syncRetentionLeases(); - } else { - indexShard.updateGlobalCheckpointOnReplica(newGlobalCheckpoint, "test"); - - final RetentionLeases retentionLeases = indexShard.getRetentionLeases(); - indexShard.updateRetentionLeasesOnReplica( - new RetentionLeases( - retentionLeases.primaryTerm(), - retentionLeases.version() + 1, - retentionLeases.leases() - .stream() - .map( - lease -> new RetentionLease( - lease.id(), - newGlobalCheckpoint + 1, - lease.timestamp(), - ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE - ) + final long newGlobalCheckpoint = indexShard.getLocalCheckpoint(); + if (indexShard.routingEntry().primary()) { + indexShard.updateLocalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint()); + indexShard.updateGlobalCheckpointForShard( + indexShard.routingEntry().allocationId().getId(), + indexShard.getLocalCheckpoint() + ); + indexShard.syncRetentionLeases(); + } else { + indexShard.updateGlobalCheckpointOnReplica(newGlobalCheckpoint, "test"); + + final RetentionLeases retentionLeases = indexShard.getRetentionLeases(); + indexShard.updateRetentionLeasesOnReplica( + new RetentionLeases( + retentionLeases.primaryTerm(), + retentionLeases.version() + 1, + retentionLeases.leases() + .stream() + .map( + lease -> new RetentionLease( + lease.id(), + newGlobalCheckpoint + 1, + lease.timestamp(), + ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE ) - .collect(Collectors.toList()) - ) - ); - } - indexShard.sync(); + ) + .collect(Collectors.toList()) + ) + ); } + indexShard.sync(); // flush the buffered deletes final FlushRequest flushRequest = new FlushRequest(); flushRequest.force(false); @@ -3885,7 +3856,7 @@ public void testScheduledRefresh() throws Exception { indexDoc(primary, "_doc", "2", "{\"foo\" : \"bar\"}"); assertFalse(primary.scheduledRefresh()); assertTrue(primary.isSearchIdle()); - primary.checkIdle(0); + primary.flushOnIdle(0); assertTrue(primary.scheduledRefresh()); // make sure we refresh once the shard is inactive try (Engine.Searcher searcher = primary.acquireSearcher("test")) { assertEquals(3, searcher.getIndexReader().numDocs()); @@ -3931,260 +3902,6 @@ public void testRefreshIsNeededWithRefreshListeners() throws IOException, Interr closeShards(primary); } - public void testSegmentMemoryTrackedInBreaker() throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .build(); - IndexMetadata metadata = IndexMetadata.builder("test") - .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") - .settings(settings) - .primaryTerm(0, 1) - .build(); - IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null); - recoverShardFromStore(primary); - indexDoc(primary, "_doc", "0", "{\"foo\" : \"foo\"}"); - primary.refresh("forced refresh"); - - SegmentsStats ss = primary.segmentStats(randomBoolean(), randomBoolean()); - CircuitBreaker breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(ss.getMemoryInBytes(), equalTo(breaker.getUsed())); - final long preRefreshBytes = ss.getMemoryInBytes(); - - indexDoc(primary, "_doc", "1", "{\"foo\" : \"bar\"}"); - indexDoc(primary, "_doc", "2", "{\"foo\" : \"baz\"}"); - indexDoc(primary, "_doc", "3", "{\"foo\" : \"eggplant\"}"); - - ss = primary.segmentStats(randomBoolean(), randomBoolean()); - breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(preRefreshBytes, equalTo(breaker.getUsed())); - - primary.refresh("refresh"); - - ss = primary.segmentStats(randomBoolean(), randomBoolean()); - breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(breaker.getUsed(), equalTo(ss.getMemoryInBytes())); - assertThat(breaker.getUsed(), greaterThan(preRefreshBytes)); - - indexDoc(primary, "_doc", "4", "{\"foo\": \"potato\"}"); - indexDoc(primary, "_doc", "5", "{\"foo\": \"potato\"}"); - // Forces a refresh with the INTERNAL scope - ((InternalEngine) primary.getEngine()).writeIndexingBuffer(); - - ss = primary.segmentStats(randomBoolean(), randomBoolean()); - breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(breaker.getUsed(), equalTo(ss.getMemoryInBytes())); - assertThat(breaker.getUsed(), greaterThan(preRefreshBytes)); - final long postRefreshBytes = ss.getMemoryInBytes(); - - // Deleting a doc causes its memory to be freed from the breaker - deleteDoc(primary, "_doc", "0"); - // Here we are testing that a fully deleted segment should be dropped and its memory usage is freed. - // In order to instruct the merge policy not to keep a fully deleted segment, - // we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything. - if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { - primary.updateGlobalCheckpointForShard(primary.routingEntry().allocationId().getId(), primary.getLastSyncedGlobalCheckpoint()); - primary.syncRetentionLeases(); - primary.sync(); - flushShard(primary); - } - primary.refresh("force refresh"); - - ss = primary.segmentStats(randomBoolean(), randomBoolean()); - breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(breaker.getUsed(), lessThan(postRefreshBytes)); - - closeShards(primary); - - breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(breaker.getUsed(), equalTo(0L)); - } - - public void testSegmentMemoryTrackedWithRandomSearchers() throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .build(); - IndexMetadata metadata = IndexMetadata.builder("test") - .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") - .settings(settings) - .primaryTerm(0, 1) - .build(); - IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null); - recoverShardFromStore(primary); - - int threadCount = randomIntBetween(2, 4); - List threads = new ArrayList<>(threadCount); - int iterations = randomIntBetween(10, 20); - List searchers = Collections.synchronizedList(new ArrayList<>()); - - logger.info("--> running with {} threads and {} iterations each", threadCount, iterations); - for (int threadId = 0; threadId < threadCount; threadId++) { - final String threadName = "thread-" + threadId; - Runnable r = () -> { - for (int i = 0; i < iterations; i++) { - try { - if (randomBoolean()) { - String id = "id-" + threadName + "-" + i; - logger.debug("--> {} indexing {}", threadName, id); - indexDoc(primary, "_doc", id, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); - } - - if (randomBoolean() && i > 10) { - String id = "id-" + threadName + "-" + randomIntBetween(0, i - 1); - logger.debug("--> {}, deleting {}", threadName, id); - deleteDoc(primary, "_doc", id); - } - - if (randomBoolean()) { - logger.debug("--> {} refreshing", threadName); - primary.refresh("forced refresh"); - } - - if (randomBoolean()) { - String searcherName = "searcher-" + threadName + "-" + i; - logger.debug("--> {} acquiring new searcher {}", threadName, searcherName); - // Acquire a new searcher, adding it to the list - searchers.add(primary.acquireSearcher(searcherName)); - } - - if (randomBoolean() && searchers.size() > 1) { - // Close one of the readers at random - synchronized (searchers) { - // re-check because it could have decremented after the check - if (searchers.size() > 1) { - Engine.Searcher searcher = searchers.remove(0); - logger.debug("--> {} closing searcher {}", threadName, searcher.source()); - IOUtils.close(searcher); - } - } - } - } catch (Exception e) { - logger.warn("--> got exception: ", e); - fail("got an exception we didn't expect"); - } - } - - }; - threads.add(new Thread(r, threadName)); - } - threads.stream().forEach(t -> t.start()); - - for (Thread t : threads) { - t.join(); - } - - // We need to wait for all ongoing merges to complete. The reason is that during a merge the - // IndexWriter holds the core cache key open and causes the memory to be registered in the breaker - primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(true)); - - // Close remaining searchers - IOUtils.close(searchers); - primary.refresh("test"); - - SegmentsStats ss = primary.segmentStats(randomBoolean(), randomBoolean()); - CircuitBreaker breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - long segmentMem = ss.getMemoryInBytes(); - long breakerMem = breaker.getUsed(); - logger.info("--> comparing segmentMem: {} - breaker: {} => {}", segmentMem, breakerMem, segmentMem == breakerMem); - assertThat(segmentMem, equalTo(breakerMem)); - - // Close shard - closeShards(primary); - - // Check that the breaker was successfully reset to 0, meaning that all the accounting was correctly applied - breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(breaker.getUsed(), equalTo(0L)); - } - - public void testFlushOnInactive() throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .build(); - IndexMetadata metadata = IndexMetadata.builder("test") - .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") - .settings(settings) - .primaryTerm(0, 1) - .build(); - ShardRouting shardRouting = TestShardRouting.newShardRouting( - new ShardId(metadata.getIndex(), 0), - "n1", - true, - ShardRoutingState.INITIALIZING, - RecoverySource.EmptyStoreRecoverySource.INSTANCE - ); - final ShardId shardId = shardRouting.shardId(); - final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); - ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); - AtomicBoolean markedInactive = new AtomicBoolean(); - AtomicReference primaryRef = new AtomicReference<>(); - IndexShard primary = newShard( - shardRouting, - shardPath, - metadata, - null, - null, - new InternalEngineFactory(), - new EngineConfigFactory(new IndexSettings(metadata, settings)), - () -> {}, - RetentionLeaseSyncer.EMPTY, - new IndexEventListener() { - @Override - public void onShardInactive(IndexShard indexShard) { - markedInactive.set(true); - primaryRef.get().flush(new FlushRequest()); - } - } - ); - primaryRef.set(primary); - recoverShardFromStore(primary); - for (int i = 0; i < 3; i++) { - indexDoc(primary, "_doc", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); - primary.refresh("test"); // produce segments - } - List segments = primary.segments(false); - Set names = new HashSet<>(); - for (Segment segment : segments) { - assertFalse(segment.committed); - assertTrue(segment.search); - names.add(segment.getName()); - } - assertEquals(3, segments.size()); - primary.flush(new FlushRequest()); - primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(false)); - primary.refresh("test"); - segments = primary.segments(false); - for (Segment segment : segments) { - if (names.contains(segment.getName())) { - assertTrue(segment.committed); - assertFalse(segment.search); - } else { - assertFalse(segment.committed); - assertTrue(segment.search); - } - } - assertEquals(4, segments.size()); - - assertFalse(markedInactive.get()); - assertBusy(() -> { - primary.checkIdle(0); - assertFalse(primary.isActive()); - }); - - assertTrue(markedInactive.get()); - segments = primary.segments(false); - assertEquals(1, segments.size()); - for (Segment segment : segments) { - assertTrue(segment.committed); - assertTrue(segment.search); - } - closeShards(primary); - } - public void testOnCloseStats() throws IOException { final IndexShard indexShard = newStartedShard(true); @@ -4624,19 +4341,11 @@ public void testTypelessGet() throws IOException { assertTrue(indexResult.isCreated()); org.opensearch.index.engine.Engine.GetResult getResult = shard.get( - new Engine.Get(true, true, "some_type", "0", new Term("_id", Uid.encodeId("0"))) + new Engine.Get(true, true, "0", new Term("_id", Uid.encodeId("0"))) ); assertTrue(getResult.exists()); getResult.close(); - getResult = shard.get(new Engine.Get(true, true, "some_other_type", "0", new Term("_id", Uid.encodeId("0")))); - assertFalse(getResult.exists()); - getResult.close(); - - getResult = shard.get(new Engine.Get(true, true, "_doc", "0", new Term("_id", Uid.encodeId("0")))); - assertTrue(getResult.exists()); - getResult.close(); - closeShards(shard); } diff --git a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java index 4a9b445c12f80..1c3fa908f11da 100644 --- a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java @@ -53,34 +53,23 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; -import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.index.translog.TestTranslog; -import org.opensearch.index.translog.Translog; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsInstanceOf.instanceOf; -import static org.mockito.Mockito.anyLong; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; public class PrimaryReplicaSyncerTests extends IndexShardTestCase { @@ -238,41 +227,6 @@ public void onResponse(PrimaryReplicaSyncer.ResyncTask result) { } } - public void testDoNotSendOperationsWithoutSequenceNumber() throws Exception { - IndexShard shard = spy(newStartedShard(true)); - when(shard.getLastKnownGlobalCheckpoint()).thenReturn(SequenceNumbers.UNASSIGNED_SEQ_NO); - int numOps = between(0, 20); - List operations = new ArrayList<>(); - for (int i = 0; i < numOps; i++) { - operations.add( - new Translog.Index( - "_doc", - Integer.toString(i), - randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : i, - primaryTerm, - new byte[] { 1 } - ) - ); - } - Engine.HistorySource source = shard.indexSettings.isSoftDeleteEnabled() - ? Engine.HistorySource.INDEX - : Engine.HistorySource.TRANSLOG; - doReturn(TestTranslog.newSnapshotFromOperations(operations)).when(shard).getHistoryOperations(anyString(), eq(source), anyLong()); - TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); - List sentOperations = new ArrayList<>(); - PrimaryReplicaSyncer.SyncAction syncAction = (request, parentTask, allocationId, primaryTerm, listener) -> { - sentOperations.addAll(Arrays.asList(request.getOperations())); - listener.onResponse(new ResyncReplicationResponse()); - }; - PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(taskManager, syncAction); - syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 10))); - PlainActionFuture fut = new PlainActionFuture<>(); - syncer.resync(shard, fut); - fut.actionGet(); - assertThat(sentOperations, equalTo(operations.stream().filter(op -> op.seqNo() >= 0).collect(Collectors.toList()))); - closeShards(shard); - } - public void testStatusSerialization() throws IOException { PrimaryReplicaSyncer.ResyncTask.Status status = new PrimaryReplicaSyncer.ResyncTask.Status( randomAlphaOfLength(10), diff --git a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java index e67905b3255ac..5795ab65a2e1a 100644 --- a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java @@ -368,7 +368,7 @@ public void testLotsOfThreads() throws Exception { } listener.assertNoError(); - Engine.Get get = new Engine.Get(false, false, "test", threadId, new Term(IdFieldMapper.NAME, threadId)); + Engine.Get get = new Engine.Get(false, false, threadId, new Term(IdFieldMapper.NAME, threadId)); try (Engine.GetResult getResult = engine.get(get, engine::acquireSearcher)) { assertTrue("document not found", getResult.exists()); assertEquals(iteration, getResult.version()); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java index fc8fe408a0c6d..a04be37176389 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java @@ -39,6 +39,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.get.GetResult; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -66,7 +67,7 @@ public void testGetForUpdate() throws IOException { recoverShardFromStore(primary); Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet = primary.getService().getForUpdate("test", "0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + GetResult testGet = primary.getService().getForUpdate("0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { @@ -75,7 +76,7 @@ public void testGetForUpdate() throws IOException { Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet1 = primary.getService().getForUpdate("test", "1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); @@ -90,23 +91,17 @@ public void testGetForUpdate() throws IOException { // now again from the reader Engine.IndexResult test2 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); - testGet1 = primary.getService().getForUpdate("test", "1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); final long primaryTerm = primary.getOperationPrimaryTerm(); - testGet1 = primary.getService().getForUpdate("test", "1", test2.getSeqNo(), primaryTerm); + testGet1 = primary.getService().getForUpdate("1", test2.getSeqNo(), primaryTerm); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - expectThrows( - VersionConflictEngineException.class, - () -> primary.getService().getForUpdate("test", "1", test2.getSeqNo() + 1, primaryTerm) - ); - expectThrows( - VersionConflictEngineException.class, - () -> primary.getService().getForUpdate("test", "1", test2.getSeqNo(), primaryTerm + 1) - ); + expectThrows(VersionConflictEngineException.class, () -> primary.getService().getForUpdate("1", test2.getSeqNo() + 1, primaryTerm)); + expectThrows(VersionConflictEngineException.class, () -> primary.getService().getForUpdate("1", test2.getSeqNo(), primaryTerm + 1)); closeShards(primary); } @@ -139,7 +134,7 @@ private void runGetFromTranslogWithOptions( IndexMetadata metadata = IndexMetadata.builder("test") .putMapping( - "test", + MapperService.SINGLE_MAPPING_NAME, "{ \"properties\": { \"foo\": { \"type\": " + fieldType + ", \"store\": true }, " @@ -154,18 +149,18 @@ private void runGetFromTranslogWithOptions( .build(); IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null); recoverShardFromStore(primary); - Engine.IndexResult test = indexDoc(primary, "test", "0", docToIndex); + Engine.IndexResult test = indexDoc(primary, MapperService.SINGLE_MAPPING_NAME, "0", docToIndex); assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet = primary.getService().getForUpdate("test", "0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + GetResult testGet = primary.getService().getForUpdate("0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals(new String(testGet.source() == null ? new byte[0] : testGet.source(), StandardCharsets.UTF_8), expectedResult); try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed } - Engine.IndexResult test1 = indexDoc(primary, "test", "1", docToIndex, XContentType.JSON, "foobar"); + Engine.IndexResult test1 = indexDoc(primary, MapperService.SINGLE_MAPPING_NAME, "1", docToIndex, XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet1 = primary.getService().getForUpdate("test", "1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source() == null ? new byte[0] : testGet1.source(), StandardCharsets.UTF_8), expectedResult); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); @@ -177,10 +172,10 @@ private void runGetFromTranslogWithOptions( assertEquals(searcher.getIndexReader().maxDoc(), 2); } - Engine.IndexResult test2 = indexDoc(primary, "test", "2", docToIndex, XContentType.JSON, "foobar"); + Engine.IndexResult test2 = indexDoc(primary, MapperService.SINGLE_MAPPING_NAME, "2", docToIndex, XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet2 = primary.getService() - .get("test", "2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE); + .get("2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE); assertEquals(new String(testGet2.source() == null ? new byte[0] : testGet2.source(), StandardCharsets.UTF_8), expectedResult); assertTrue(testGet2.getFields().containsKey(RoutingFieldMapper.NAME)); assertTrue(testGet2.getFields().containsKey("foo")); @@ -193,8 +188,7 @@ private void runGetFromTranslogWithOptions( assertEquals(searcher.getIndexReader().maxDoc(), 3); } - testGet2 = primary.getService() - .get("test", "2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE); + testGet2 = primary.getService().get("2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE); assertEquals(new String(testGet2.source() == null ? new byte[0] : testGet2.source(), StandardCharsets.UTF_8), expectedResult); assertTrue(testGet2.getFields().containsKey(RoutingFieldMapper.NAME)); assertTrue(testGet2.getFields().containsKey("foo")); @@ -219,13 +213,7 @@ public void testTypelessGetForUpdate() throws IOException { Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}"); assertTrue(indexResult.isCreated()); - GetResult getResult = shard.getService().getForUpdate("some_type", "0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); - assertTrue(getResult.isExists()); - - getResult = shard.getService().getForUpdate("some_other_type", "0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); - assertFalse(getResult.isExists()); - - getResult = shard.getService().getForUpdate("_doc", "0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + GetResult getResult = shard.getService().getForUpdate("0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertTrue(getResult.isExists()); closeShards(shard); diff --git a/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java b/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java index a725590afecbb..c81a773ecdb10 100644 --- a/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java +++ b/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java @@ -81,7 +81,7 @@ public void testTook() throws Exception { List longs = Stream.of(abs(randomLong()), abs(randomLong())).sorted().collect(toList()); - TermVectorsRequest request = new TermVectorsRequest("test", "type1", "0"); + TermVectorsRequest request = new TermVectorsRequest("test", "0"); TermVectorsResponse response = TermVectorsService.getTermVectors(shard, request, longs.iterator()::next); assertThat(response, notNullValue()); @@ -112,7 +112,7 @@ public void testDocFreqs() throws IOException { } bulk.get(); - TermVectorsRequest request = new TermVectorsRequest("test", "_doc", "0").termStatistics(true); + TermVectorsRequest request = new TermVectorsRequest("test", "0").termStatistics(true); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); @@ -153,7 +153,7 @@ public void testWithIndexedPhrases() throws IOException { } bulk.get(); - TermVectorsRequest request = new TermVectorsRequest("test", "_doc", "0").termStatistics(true); + TermVectorsRequest request = new TermVectorsRequest("test", "0").termStatistics(true); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java index 18060f493ad92..e5a41fdc15da1 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java @@ -32,11 +32,10 @@ package org.opensearch.index.translog; -import org.apache.lucene.store.ByteArrayDataOutput; import org.opensearch.common.UUIDs; -import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.BigArrays; import org.opensearch.core.internal.io.IOUtils; @@ -261,15 +260,14 @@ private Tuple, TranslogWriter> createReadersAndWriter(final ); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); - byte[] bytes = new byte[4]; - ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); + BytesStreamOutput out = new BytesStreamOutput(4); final long startSeqNo = (gen - 1) * TOTAL_OPS_IN_GEN; final long endSeqNo = startSeqNo + TOTAL_OPS_IN_GEN - 1; for (long ops = endSeqNo; ops >= startSeqNo; ops--) { - out.reset(bytes); + out.reset(); out.writeInt((int) ops); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), ops); + writer.add(ReleasableBytesReference.wrap(out.bytes()), ops); } } return new Tuple<>(readers, writer); diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogTests.java b/server/src/test/java/org/opensearch/index/translog/TranslogTests.java index e1d348e75d865..5614e07d7104d 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogTests.java @@ -43,7 +43,6 @@ import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.mockfile.FilterFileSystemProvider; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; @@ -1424,8 +1423,7 @@ public void testTranslogWriter() throws IOException { final Set seenSeqNos = new HashSet<>(); boolean opsHaveValidSequenceNumbers = randomBoolean(); for (int i = 0; i < numOps; i++) { - byte[] bytes = new byte[4]; - ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); + BytesStreamOutput out = new BytesStreamOutput(4); out.writeInt(i); long seqNo; do { @@ -1435,7 +1433,7 @@ public void testTranslogWriter() throws IOException { if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { seenSeqNos.add(seqNo); } - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), seqNo); + writer.add(ReleasableBytesReference.wrap(out.bytes()), seqNo); } assertThat(persistedSeqNos, empty()); writer.sync(); @@ -1457,10 +1455,9 @@ public void testTranslogWriter() throws IOException { assertThat(reader.getCheckpoint().minSeqNo, equalTo(minSeqNo)); assertThat(reader.getCheckpoint().maxSeqNo, equalTo(maxSeqNo)); - byte[] bytes = new byte[4]; - ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); + BytesStreamOutput out = new BytesStreamOutput(4); out.writeInt(2048); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); + writer.add(ReleasableBytesReference.wrap(out.bytes()), randomNonNegativeLong()); if (reader instanceof TranslogReader) { ByteBuffer buffer = ByteBuffer.allocate(4); @@ -1666,10 +1663,9 @@ ChannelFactory getChannelFactory() { ) { TranslogWriter writer = translog.getCurrent(); - byte[] bytes = new byte[4]; - ByteArrayDataOutput out = new ByteArrayDataOutput(new byte[4]); + BytesStreamOutput out = new BytesStreamOutput(4); out.writeInt(1); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 1); + writer.add(ReleasableBytesReference.wrap(out.bytes()), 1); assertThat(persistedSeqNos, empty()); startBlocking.set(true); Thread thread = new Thread(() -> { @@ -1683,7 +1679,7 @@ ChannelFactory getChannelFactory() { writeStarted.await(); // Add will not block even though we are currently writing/syncing - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 2); + writer.add(ReleasableBytesReference.wrap(out.bytes()), 2); blocker.countDown(); // Sync against so that both operations are written @@ -1698,11 +1694,10 @@ public void testCloseIntoReader() throws IOException { try (TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1)) { final int numOps = randomIntBetween(8, 128); for (int i = 0; i < numOps; i++) { - final byte[] bytes = new byte[4]; - final ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); - out.reset(bytes); + final BytesStreamOutput out = new BytesStreamOutput(4); + out.reset(); out.writeInt(i); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); + writer.add(ReleasableBytesReference.wrap(out.bytes()), randomNonNegativeLong()); } writer.sync(); final Checkpoint writerCheckpoint = writer.getCheckpoint(); diff --git a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java index d132d7456282b..fa927a58a2de1 100644 --- a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java @@ -39,7 +39,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.hunspell.Dictionary; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.store.Directory; import org.apache.lucene.store.NIOFSDirectory; @@ -57,7 +56,6 @@ import org.opensearch.index.analysis.CharFilterFactory; import org.opensearch.index.analysis.CustomAnalyzer; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.analysis.PreConfiguredCharFilter; import org.opensearch.index.analysis.PreConfiguredTokenFilter; import org.opensearch.index.analysis.PreConfiguredTokenizer; @@ -93,7 +91,6 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; public class AnalysisModuleTests extends OpenSearchTestCase { private final Settings emptyNodeSettings = Settings.builder() @@ -146,32 +143,6 @@ public void testSimpleConfigurationYaml() throws IOException { testSimpleConfiguration(settings); } - public void testVersionedAnalyzers() throws Exception { - String yaml = "/org/opensearch/index/analysis/test1.yml"; - Version version = VersionUtils.randomVersion(random()); - Settings settings2 = Settings.builder() - .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetadata.SETTING_VERSION_CREATED, version) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings2); - IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2); - - // registry always has the current version - assertThat(newRegistry.getAnalyzer("default"), is(instanceOf(NamedAnalyzer.class))); - NamedAnalyzer defaultNamedAnalyzer = (NamedAnalyzer) newRegistry.getAnalyzer("default"); - assertThat(defaultNamedAnalyzer.analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(Version.CURRENT.luceneVersion, defaultNamedAnalyzer.analyzer().getVersion()); - - // analysis service has the expected version - assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(version.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion()); - assertEquals(version.luceneVersion, indexAnalyzers.get("stop").analyzer().getVersion()); - - assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(org.apache.lucene.util.Version.fromBits(3, 6, 0), indexAnalyzers.get("custom7").analyzer().getVersion()); - } - private void testSimpleConfiguration(Settings settings) throws IOException { IndexAnalyzers indexAnalyzers = getIndexAnalyzers(settings); Analyzer analyzer = indexAnalyzers.get("custom1").analyzer(); diff --git a/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index dbe05c568b35e..7bdff59e7c334 100644 --- a/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -275,7 +275,7 @@ public void testBorrowingSiblingBreakerMemory() { assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); assertThat( exception.getMessage(), - containsString("usages [request=157286400/150mb, fielddata=54001664/51.5mb, in_flight_requests=0/0b, accounting=0/0b]") + containsString("usages [request=157286400/150mb, fielddata=54001664/51.5mb, in_flight_requests=0/0b]") ); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); } @@ -341,7 +341,7 @@ long currentMemoryUsage() { + requestCircuitBreakerUsed + "/" + new ByteSizeValue(requestCircuitBreakerUsed) - + ", fielddata=0/0b, in_flight_requests=0/0b, accounting=0/0b]" + + ", fielddata=0/0b, in_flight_requests=0/0b]" ) ); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 89b240d76673f..3cda78018edfc 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -585,7 +585,6 @@ private IndicesClusterStateService createIndicesClusterStateService( null, null, null, - null, primaryReplicaSyncer, s -> {}, RetentionLeaseSyncer.EMPTY diff --git a/server/src/test/java/org/opensearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/flush/SyncedFlushSingleNodeTests.java deleted file mode 100644 index 98305f198e6df..0000000000000 --- a/server/src/test/java/org/opensearch/indices/flush/SyncedFlushSingleNodeTests.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.flush; - -import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.UUIDs; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.IndexService; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.ShardId; -import org.opensearch.index.shard.ShardNotFoundException; -import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchSingleNodeTestCase; -import org.opensearch.threadpool.ThreadPool; - -import java.util.List; -import java.util.Map; - -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - -public class SyncedFlushSingleNodeTests extends OpenSearchSingleNodeTestCase { - - public void testModificationPreventsFlushing() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = SyncedFlushUtil.sendPreSyncRequests( - flushService, - activeShards, - state, - shardId - ); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get(); - String syncId = UUIDs.randomBase64UUID(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - - // pull another commit and make sure we can't sync-flush with the old one - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - public void testSingleShardSuccess() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(1, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); - assertTrue(response.success()); - } - - public void testSyncFailsIfOperationIsInFlight() throws Exception { - createIndex("test"); - client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - // wait for the GCP sync spawned from the index request above to complete to avoid that request disturbing the check below - assertBusy(() -> { - assertEquals(0, shard.getLastSyncedGlobalCheckpoint()); - assertEquals(0, shard.getActiveOperationsCount()); - }); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - PlainActionFuture fut = new PlainActionFuture<>(); - shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.WRITE, ""); - try (Releasable operationLock = fut.get()) { - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertNotEquals(0, syncedFlushResult.totalShards()); - assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason()); - } - } - - public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { - createIndex( - "test", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - assertNotNull(shard); - final ShardId shardId = shard.shardId(); - - final SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId(shard.shardId().getIndex(), 1), listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals(ShardNotFoundException.class, listener.error.getClass()); - assertEquals("no such shard", listener.error.getMessage()); - - assertAcked(client().admin().indices().prepareClose("test")); - listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals("closed", listener.error.getMessage()); - - listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId("index not found", "_na_", 0), listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals("no such index [index not found]", listener.error.getMessage()); - } - - public void testFailAfterIntermediateCommit() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = SyncedFlushUtil.sendPreSyncRequests( - flushService, - activeShards, - state, - shardId - ); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - if (randomBoolean()) { - client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get(); - } - client().admin().indices().prepareFlush("test").setForce(true).get(); - String syncId = UUIDs.randomBase64UUID(); - final SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - public void testFailWhenCommitIsMissing() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = SyncedFlushUtil.sendPreSyncRequests( - flushService, - activeShards, - state, - shardId - ); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - preSyncedResponses.clear(); // wipe it... - String syncId = UUIDs.randomBase64UUID(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - -} diff --git a/server/src/test/java/org/opensearch/indices/flush/SyncedFlushUtil.java b/server/src/test/java/org/opensearch/indices/flush/SyncedFlushUtil.java deleted file mode 100644 index def9c0c15ced2..0000000000000 --- a/server/src/test/java/org/opensearch/indices/flush/SyncedFlushUtil.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.flush; - -import org.apache.logging.log4j.Logger; -import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.index.shard.ShardId; -import org.opensearch.test.InternalTestCluster; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; - -import static org.opensearch.test.OpenSearchTestCase.assertBusy; - -/** Utils for SyncedFlush */ -public class SyncedFlushUtil { - - private SyncedFlushUtil() { - - } - - /** - * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} - */ - public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception { - /* - * When the last indexing operation is completed, we will fire a global checkpoint sync. - * Since a global checkpoint sync request is a replication request, it will acquire an index - * shard permit on the primary when executing. If this happens at the same time while we are - * issuing the synced-flush, the synced-flush request will fail as it thinks there are - * in-flight operations. We can avoid such situation by continuing issuing another synced-flush - * if the synced-flush failed due to the ongoing operations on the primary. - */ - SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - AtomicReference> listenerHolder = new AtomicReference<>(); - assertBusy(() -> { - LatchedListener listener = new LatchedListener<>(); - listenerHolder.set(listener); - service.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - if (listener.result != null - && listener.result.failureReason() != null - && listener.result.failureReason().contains("ongoing operations on primary")) { - throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry - } - }); - if (listenerHolder.get().error != null) { - throw ExceptionsHelper.convertToOpenSearchException(listenerHolder.get().error); - } - return listenerHolder.get().result; - } - - public static final class LatchedListener implements ActionListener { - public volatile T result; - public volatile Exception error; - public final CountDownLatch latch = new CountDownLatch(1); - - @Override - public void onResponse(T result) { - this.result = result; - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - error = e; - latch.countDown(); - } - } - - /** - * Blocking version of {@link SyncedFlushService#sendPreSyncRequests(List, ClusterState, ShardId, ActionListener)} - */ - public static Map sendPreSyncRequests( - SyncedFlushService service, - List activeShards, - ClusterState state, - ShardId shardId - ) { - LatchedListener> listener = new LatchedListener<>(); - service.sendPreSyncRequests(activeShards, state, shardId, listener); - try { - listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - if (listener.error != null) { - throw ExceptionsHelper.convertToOpenSearchException(listener.error); - } - return listener.result; - } -} diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java index df6bc6ee27990..78a0f9721e5ad 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java @@ -665,7 +665,6 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, between(0, 5)) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) .put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersion(random())) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) ); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 6286921688e67..54f4a22f3a577 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -144,105 +144,6 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception { } } - public void testRecoveryWithOutOfOrderDeleteWithTranslog() throws Exception { - /* - * The flow of this test: - * - delete #1 - * - roll generation (to create gen 2) - * - index #0 - * - index #3 - * - flush (commit point has max_seqno 3, and local checkpoint 1 -> points at gen 2, previous commit point is maintained) - * - index #2 - * - index #5 - * - If flush and the translog retention disabled, delete #1 will be removed while index #0 is still retained and replayed. - */ - Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); - try (ReplicationGroup shards = createGroup(1, settings)) { - shards.startAll(); - // create out of order delete and index op on replica - final IndexShard orgReplica = shards.getReplicas().get(0); - final String indexName = orgReplica.shardId().getIndexName(); - final long primaryTerm = orgReplica.getOperationPrimaryTerm(); - - // delete #1 - orgReplica.advanceMaxSeqNoOfUpdatesOrDeletes(1); // manually advance msu for this delete - orgReplica.applyDeleteOperationOnReplica(1, primaryTerm, 2, "type", "id"); - getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation - // index #0 - orgReplica.applyIndexOperationOnReplica( - 0, - primaryTerm, - 1, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, - false, - new SourceToParse(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON) - ); - // index #3 - orgReplica.applyIndexOperationOnReplica( - 3, - primaryTerm, - 1, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, - false, - new SourceToParse(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON) - ); - // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1. - orgReplica.flush(new FlushRequest().force(true).waitIfOngoing(true)); - // index #2 - orgReplica.applyIndexOperationOnReplica( - 2, - primaryTerm, - 1, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, - false, - new SourceToParse(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON) - ); - orgReplica.sync(); // advance local checkpoint - orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); - // index #5 -> force NoOp #4. - orgReplica.applyIndexOperationOnReplica( - 5, - primaryTerm, - 1, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, - false, - new SourceToParse(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON) - ); - - final int translogOps; - if (randomBoolean()) { - if (randomBoolean()) { - logger.info("--> flushing shard (translog will be trimmed)"); - IndexMetadata.Builder builder = IndexMetadata.builder(orgReplica.indexSettings().getIndexMetadata()); - builder.settings( - Settings.builder() - .put(orgReplica.indexSettings().getSettings()) - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") - ); - orgReplica.indexSettings().updateIndexMetadata(builder.build()); - orgReplica.onSettingsChanged(); - translogOps = 5; // 4 ops + seqno gaps (delete #1 is removed but index #0 will be replayed). - } else { - logger.info("--> flushing shard (translog will be retained)"); - translogOps = 6; // 5 ops + seqno gaps - } - flushShard(orgReplica); - } else { - translogOps = 6; // 5 ops + seqno gaps - } - - final IndexShard orgPrimary = shards.getPrimary(); - shards.promoteReplicaToPrimary(orgReplica).get(); // wait for primary/replica sync to make sure seq# gap is closed. - - IndexShard newReplica = shards.addReplicaWithExistingPath(orgPrimary.shardPath(), orgPrimary.routingEntry().currentNodeId()); - shards.recoverReplica(newReplica); - shards.assertAllEqual(3); - - assertThat(getTranslog(newReplica).totalOperations(), equalTo(translogOps)); - } - } - public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { Settings settings = Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) @@ -324,7 +225,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { IndexShard newReplica = shards.addReplicaWithExistingPath(orgPrimary.shardPath(), orgPrimary.routingEntry().currentNodeId()); shards.recoverReplica(newReplica); shards.assertAllEqual(3); - try (Translog.Snapshot snapshot = newReplica.getHistoryOperations("test", Engine.HistorySource.INDEX, 0)) { + try (Translog.Snapshot snapshot = newReplica.newChangesSnapshot("test", 0, Long.MAX_VALUE, false)) { assertThat(snapshot, SnapshotMatchers.size(6)); } } @@ -585,7 +486,7 @@ public void testRecoveryTrimsLocalTranslog() throws Exception { } int inflightDocs = scaledRandomIntBetween(1, 100); for (int i = 0; i < inflightDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "extra_" + i).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_" + i).source("{}", XContentType.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); for (IndexShard replica : randomSubsetOf(shards.getReplicas())) { indexOnReplica(bulkShardRequest, shards, replica); diff --git a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java index 2c4bc5061d822..a6ea02a5423c4 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java @@ -92,7 +92,7 @@ public void setTestIngestDocument() { list2.add("bar"); list2.add("baz"); document.put("list2", list2); - ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); + ingestDocument = new IngestDocument("index", "id", null, null, null, document); } public void testSimpleGetFieldValue() { @@ -101,7 +101,6 @@ public void testSimpleGetFieldValue() { assertThat(ingestDocument.getFieldValue("_source.foo", String.class), equalTo("bar")); assertThat(ingestDocument.getFieldValue("_source.int", Integer.class), equalTo(123)); assertThat(ingestDocument.getFieldValue("_index", String.class), equalTo("index")); - assertThat(ingestDocument.getFieldValue("_type", String.class), equalTo("type")); assertThat(ingestDocument.getFieldValue("_id", String.class), equalTo("id")); assertThat( ingestDocument.getFieldValue("_ingest.timestamp", ZonedDateTime.class), @@ -238,7 +237,6 @@ public void testGetFieldValueEmpty() { public void testHasField() { assertTrue(ingestDocument.hasField("fizz")); assertTrue(ingestDocument.hasField("_index")); - assertTrue(ingestDocument.hasField("_type")); assertTrue(ingestDocument.hasField("_id")); assertTrue(ingestDocument.hasField("_source.fizz")); assertTrue(ingestDocument.hasField("_ingest.timestamp")); @@ -808,23 +806,23 @@ public void testSetFieldValueEmptyName() { public void testRemoveField() { ingestDocument.removeField("foo"); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(8)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(7)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("foo"), equalTo(false)); ingestDocument.removeField("_index"); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(7)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(6)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("_index"), equalTo(false)); ingestDocument.removeField("_source.fizz"); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(6)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(5)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("fizz"), equalTo(false)); assertThat(ingestDocument.getIngestMetadata().size(), equalTo(1)); ingestDocument.removeField("_ingest.timestamp"); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(6)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(5)); assertThat(ingestDocument.getIngestMetadata().size(), equalTo(0)); } public void testRemoveInnerField() { ingestDocument.removeField("fizz.buzz"); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(9)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(8)); assertThat(ingestDocument.getSourceAndMetadata().get("fizz"), instanceOf(Map.class)); @SuppressWarnings("unchecked") Map map = (Map) ingestDocument.getSourceAndMetadata().get("fizz"); @@ -833,17 +831,17 @@ public void testRemoveInnerField() { ingestDocument.removeField("fizz.foo_null"); assertThat(map.size(), equalTo(2)); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(9)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(8)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("fizz"), equalTo(true)); ingestDocument.removeField("fizz.1"); assertThat(map.size(), equalTo(1)); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(9)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(8)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("fizz"), equalTo(true)); ingestDocument.removeField("fizz.list"); assertThat(map.size(), equalTo(0)); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(9)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(8)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("fizz"), equalTo(true)); } @@ -879,7 +877,7 @@ public void testRemoveSourceObject() { public void testRemoveIngestObject() { ingestDocument.removeField("_ingest"); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(8)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(7)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("_ingest"), equalTo(false)); } @@ -901,7 +899,7 @@ public void testRemoveEmptyPathAfterStrippingOutPrefix() { public void testListRemoveField() { ingestDocument.removeField("list.0.field"); - assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(9)); + assertThat(ingestDocument.getSourceAndMetadata().size(), equalTo(8)); assertThat(ingestDocument.getSourceAndMetadata().containsKey("list"), equalTo(true)); Object object = ingestDocument.getSourceAndMetadata().get("list"); assertThat(object, instanceOf(List.class)); diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 544fa7bc09d8f..fcd15e85979f7 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -181,7 +181,8 @@ public void testExecuteIndexPipelineDoesNotExist() { Collections.singletonList(DUMMY_PLUGIN), client ); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()) + final IndexRequest indexRequest = new IndexRequest("_index").id("_id") + .source(emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); @@ -729,13 +730,12 @@ public String getType() { ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); final SetOnce failure = new SetOnce<>(); BulkRequest bulkRequest = new BulkRequest(); - final IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id1").source(emptyMap()) + final IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1") + .source(emptyMap()) .setPipeline("_none") .setFinalPipeline("_none"); bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = new IndexRequest("_index", "_type", "_id2").source(emptyMap()) - .setPipeline(id) - .setFinalPipeline("_none"); + IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline(id).setFinalPipeline("_none"); bulkRequest.add(indexRequest2); final BiConsumer failureHandler = (slot, e) -> { @@ -778,15 +778,15 @@ public void testExecuteBulkPipelineDoesNotExist() { BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id1").source(emptyMap()) + IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1") + .source(emptyMap()) .setPipeline("_none") .setFinalPipeline("_none"); bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = new IndexRequest("_index", "_type", "_id2").source(emptyMap()) - .setPipeline("_id") - .setFinalPipeline("_none"); + IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); bulkRequest.add(indexRequest2); - IndexRequest indexRequest3 = new IndexRequest("_index", "_type", "_id3").source(emptyMap()) + IndexRequest indexRequest3 = new IndexRequest("_index").id("_id3") + .source(emptyMap()) .setPipeline("does_not_exist") .setFinalPipeline("_none"); bulkRequest.add(indexRequest3); @@ -822,7 +822,8 @@ public void testExecuteSuccess() { ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()) + final IndexRequest indexRequest = new IndexRequest("_index").id("_id") + .source(emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); @SuppressWarnings("unchecked") @@ -852,7 +853,8 @@ public void testExecuteEmptyPipeline() throws Exception { ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()) + final IndexRequest indexRequest = new IndexRequest("_index").id("_id") + .source(emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); @SuppressWarnings("unchecked") @@ -910,7 +912,8 @@ public void testExecutePropagateAllMetadataUpdates() throws Exception { handler.accept(ingestDocument, null); return null; }).when(processor).execute(any(), any()); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()) + final IndexRequest indexRequest = new IndexRequest("_index").id("_id") + .source(emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); @SuppressWarnings("unchecked") @@ -929,7 +932,6 @@ public void testExecutePropagateAllMetadataUpdates() throws Exception { verify(failureHandler, never()).accept(any(), any()); verify(completionHandler, times(1)).accept(Thread.currentThread(), null); assertThat(indexRequest.index(), equalTo("update_index")); - assertThat(indexRequest.type(), equalTo("update_type")); assertThat(indexRequest.id(), equalTo("update_id")); assertThat(indexRequest.routing(), equalTo("update_routing")); assertThat(indexRequest.version(), equalTo(newVersion)); @@ -952,7 +954,8 @@ public void testExecuteFailure() throws Exception { ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()) + final IndexRequest indexRequest = new IndexRequest("_index").id("_id") + .source(emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); doThrow(new RuntimeException()).when(processor) @@ -1011,7 +1014,8 @@ public void testExecuteSuccessWithOnFailure() throws Exception { ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()) + final IndexRequest indexRequest = new IndexRequest("_index").id("_id") + .source(emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); @SuppressWarnings("unchecked") @@ -1053,7 +1057,8 @@ public void testExecuteFailureWithNestedOnFailure() throws Exception { ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()) + final IndexRequest indexRequest = new IndexRequest("_index").id("_id") + .source(emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); doThrow(new RuntimeException()).when(onFailureOnFailureProcessor) @@ -1089,12 +1094,12 @@ public void testBulkRequestExecutionWithFailures() throws Exception { DocWriteRequest request; if (randomBoolean()) { if (randomBoolean()) { - request = new DeleteRequest("_index", "_type", "_id"); + request = new DeleteRequest("_index", "_id"); } else { - request = new UpdateRequest("_index", "_type", "_id"); + request = new UpdateRequest("_index", "_id"); } } else { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId).setFinalPipeline("_none"); + IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); request = indexRequest; numIndexRequests++; @@ -1154,7 +1159,7 @@ public void testBulkRequestExecution() throws Exception { logger.info("Using [{}], not randomly determined default [{}]", xContentType, Requests.INDEX_CONTENT_TYPE); int numRequest = scaledRandomIntBetween(8, 64); for (int i = 0; i < numRequest; i++) { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId).setFinalPipeline("_none"); + IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); indexRequest.source(xContentType, "field1", "value1"); bulkRequest.add(indexRequest); } @@ -1420,12 +1425,14 @@ public String getDescription() { ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); BulkRequest bulkRequest = new BulkRequest(); - final IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id1").source(Collections.emptyMap()) + final IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1") + .source(Collections.emptyMap()) .setPipeline("_none") .setFinalPipeline("_none"); bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = new IndexRequest("_index", "_type", "_id2").source(Collections.emptyMap()) + IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2") + .source(Collections.emptyMap()) .setPipeline("_id") .setFinalPipeline("_none"); bulkRequest.add(indexRequest2); @@ -1711,11 +1718,11 @@ private class IngestDocumentMatcher implements ArgumentMatcher { private final IngestDocument ingestDocument; IngestDocumentMatcher(String index, String type, String id, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); + this.ingestDocument = new IngestDocument(index, id, null, null, null, source); } IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); + this.ingestDocument = new IngestDocument(index, id, null, version, versionType, source); } @Override diff --git a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java index 140143ad4e00f..505dce8879bdd 100644 --- a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java @@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; +import static org.junit.Assume.assumeThat; import java.io.IOException; import java.math.BigInteger; @@ -50,9 +51,17 @@ import java.util.Locale; import java.util.stream.Collectors; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.opensearch.test.OpenSearchTestCase; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + public class OsProbeTests extends OpenSearchTestCase { public void testOsInfo() throws IOException { @@ -277,6 +286,25 @@ public void testCgroupProbeWithMissingMemory() { assertNull(cgroup); } + public void testLogWarnCpuMessageOnlyOnes() { + final Logger logger = mock(Logger.class); + + final OsProbe noCpuStatsOsProbe = new OsProbe(logger) { + @Override + List readSysFsCgroupCpuAcctCpuStat(String controlGroup) throws IOException { + return Collections.singletonList("nr_periods 1"); + } + }; + + assumeThat("CGroups are not available", noCpuStatsOsProbe.areCgroupStatsAvailable(), is(true)); + noCpuStatsOsProbe.osStats(); + // no nr_throttled and throttled_time + verify(logger, times(2)).warn(anyString()); + reset(logger); + noCpuStatsOsProbe.osStats(); + verify(logger, never()).warn(anyString()); + } + private static List getProcSelfGroupLines(String hierarchy) { return Arrays.asList( "10:freezer:/", @@ -361,4 +389,5 @@ boolean areCgroupStatsAvailable() { } }; } + } diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingActionTests.java deleted file mode 100644 index 1ab35e420a77e..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingActionTests.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.admin.indices; - -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestStatus; -import org.opensearch.test.rest.FakeRestChannel; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -import static org.opensearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; - -public class RestGetFieldMappingActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestGetFieldMappingAction()); - } - - public void testIncludeTypeName() { - Map params = new HashMap<>(); - String path; - if (randomBoolean()) { - params.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); - path = "some_index/some_type/_mapping/field/some_field"; - } else { - params.put(INCLUDE_TYPE_NAME_PARAMETER, "false"); - path = "some_index/_mapping/field/some_field"; - } - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath(path) - .withParams(params) - .build(); - dispatchRequest(deprecatedRequest); - assertWarnings(RestGetFieldMappingAction.TYPES_DEPRECATION_MESSAGE); - - RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("some_index/_mapping/field/some_field") - .build(); - dispatchRequest(validRequest); - } - - public void testTypeInPath() { - // Test that specifying a type while setting include_type_name to false - // results in an illegal argument exception. - Map params = new HashMap<>(); - params.put(INCLUDE_TYPE_NAME_PARAMETER, "false"); - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("some_index/some_type/_mapping/field/some_field") - .withParams(params) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - FakeRestChannel channel = new FakeRestChannel(request, false, 1); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - controller().dispatchRequest(request, channel, threadContext); - - assertEquals(1, channel.errors().get()); - assertEquals(RestStatus.BAD_REQUEST, channel.capturedResponse().status()); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetMappingActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetMappingActionTests.java deleted file mode 100644 index 6a7ea5018170c..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetMappingActionTests.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.admin.indices; - -import org.opensearch.client.node.NodeClient; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestStatus; -import org.opensearch.test.rest.FakeRestChannel; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; -import org.junit.After; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -import static org.opensearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; -import static org.mockito.Mockito.mock; - -public class RestGetMappingActionTests extends RestActionTestCase { - - private ThreadPool threadPool; - - @Before - public void setUpAction() { - threadPool = new TestThreadPool(RestValidateQueryActionTests.class.getName()); - controller().registerHandler(new RestGetMappingAction(threadPool)); - } - - @After - public void tearDownAction() { - assertTrue(terminate(threadPool)); - } - - public void testTypeExistsDeprecation() throws Exception { - Map params = new HashMap<>(); - params.put("type", "_doc"); - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.HEAD) - .withParams(params) - .build(); - - RestGetMappingAction handler = new RestGetMappingAction(threadPool); - handler.prepareRequest(request, mock(NodeClient.class)); - - assertWarnings("Type exists requests are deprecated, as types have been deprecated."); - } - - public void testTypeInPath() { - // Test that specifying a type while setting include_type_name to false - // results in an illegal argument exception. - Map params = new HashMap<>(); - params.put(INCLUDE_TYPE_NAME_PARAMETER, "false"); - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("some_index/some_type/_mapping/some_field") - .withParams(params) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - FakeRestChannel channel = new FakeRestChannel(request, false, 1); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - controller().dispatchRequest(request, channel, threadContext); - - assertEquals(1, channel.errors().get()); - assertEquals(RestStatus.BAD_REQUEST, channel.capturedResponse().status()); - } - - /** - * Setting "include_type_name" to true or false should cause a deprecation warning starting in 7.0 - */ - public void testTypeUrlParameterDeprecation() throws Exception { - Map params = new HashMap<>(); - params.put(INCLUDE_TYPE_NAME_PARAMETER, Boolean.toString(randomBoolean())); - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withParams(params) - .withPath("/some_index/_mappings") - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - FakeRestChannel channel = new FakeRestChannel(request, false, 1); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - controller().dispatchRequest(request, channel, threadContext); - - assertWarnings(RestGetMappingAction.TYPES_DEPRECATION_MESSAGE); - } - -} diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestPutIndexTemplateActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestPutIndexTemplateActionTests.java deleted file mode 100644 index 864d2f244b8c1..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestPutIndexTemplateActionTests.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.admin.indices; - -import org.opensearch.client.node.NodeClient; -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.RestRequest; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.opensearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; -import static org.mockito.Mockito.mock; - -public class RestPutIndexTemplateActionTests extends RestActionTestCase { - private RestPutIndexTemplateAction action; - - @Before - public void setUpAction() { - action = new RestPutIndexTemplateAction(); - controller().registerHandler(action); - } - - public void testIncludeTypeName() throws IOException { - XContentBuilder typedContent = XContentFactory.jsonBuilder() - .startObject() - .startObject("mappings") - .startObject("my_doc") - .startObject("properties") - .startObject("field1") - .field("type", "keyword") - .endObject() - .startObject("field2") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .startObject("aliases") - .startObject("read_alias") - .endObject() - .endObject() - .endObject(); - - Map params = new HashMap<>(); - params.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) - .withParams(params) - .withPath("/_template/_some_template") - .withContent(BytesReference.bytes(typedContent), XContentType.JSON) - .build(); - action.prepareRequest(request, mock(NodeClient.class)); - assertWarnings(RestPutIndexTemplateAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestPutMappingActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestPutMappingActionTests.java deleted file mode 100644 index c3255558d2569..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestPutMappingActionTests.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.admin.indices; - -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestStatus; -import org.opensearch.test.rest.FakeRestChannel; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -import static org.opensearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; - -public class RestPutMappingActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestPutMappingAction()); - } - - public void testIncludeTypeName() { - Map params = new HashMap<>(); - params.put(INCLUDE_TYPE_NAME_PARAMETER, randomFrom("true", "false")); - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) - .withPath("/some_index/_mapping/") - .withParams(params) - .build(); - - dispatchRequest(deprecatedRequest); - assertWarnings(RestPutMappingAction.TYPES_DEPRECATION_MESSAGE); - - RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) - .withPath("/some_index/_mapping") - .build(); - dispatchRequest(validRequest); - } - - public void testTypeInPath() { - // Test that specifying a type while include_type_name is false - // results in an illegal argument exception. - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) - .withPath("/some_index/_mapping/some_type") - .build(); - - FakeRestChannel channel = new FakeRestChannel(request, false, 1); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - controller().dispatchRequest(request, channel, threadContext); - - assertEquals(1, channel.errors().get()); - assertEquals(RestStatus.BAD_REQUEST, channel.capturedResponse().status()); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java index 9ed3cc41e4fb0..cc1a9d4fd2e40 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -41,10 +41,8 @@ import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.XContentType; import org.opensearch.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestController; import org.opensearch.rest.RestRequest; import org.opensearch.search.AbstractSearchTestCase; @@ -174,31 +172,4 @@ private RestRequest createRestRequest(String content) { .withContent(new BytesArray(content), XContentType.JSON) .build(); } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_validate/query") - .build(); - - performRequest(request); - assertWarnings(RestValidateQueryAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("_validate/query") - .withParams(params) - .build(); - - performRequest(request); - assertWarnings(RestValidateQueryAction.TYPES_DEPRECATION_MESSAGE); - } - - private void performRequest(RestRequest request) { - RestChannel channel = new FakeRestChannel(request, false, 1); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - controller.dispatchRequest(request, channel, threadContext); - } } diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestDeleteActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestDeleteActionTests.java deleted file mode 100644 index ae7f5a3a92cdf..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/document/RestDeleteActionTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.document; - -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -public class RestDeleteActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestDeleteAction()); - } - - public void testTypeInPath() { - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.DELETE) - .withPath("/some_index/some_type/some_id") - .build(); - dispatchRequest(deprecatedRequest); - assertWarnings(RestDeleteAction.TYPES_DEPRECATION_MESSAGE); - - RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.DELETE) - .withPath("/some_index/_doc/some_id") - .build(); - dispatchRequest(validRequest); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestGetActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestGetActionTests.java deleted file mode 100644 index d4d0a81ccecb7..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/document/RestGetActionTests.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.document; - -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -public class RestGetActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestGetAction()); - } - - public void testTypeInPathWithGet() { - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - FakeRestRequest.Builder deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withPath( - "/some_index/some_type/some_id" - ); - dispatchRequest(deprecatedRequest.withMethod(Method.GET).build()); - assertWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE); - - FakeRestRequest.Builder validRequest = new FakeRestRequest.Builder(xContentRegistry()).withPath("/some_index/_doc/some_id"); - dispatchRequest(validRequest.withMethod(Method.GET).build()); - } - - public void testTypeInPathWithHead() { - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - FakeRestRequest.Builder deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withPath( - "/some_index/some_type/some_id" - ); - dispatchRequest(deprecatedRequest.withMethod(Method.HEAD).build()); - assertWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE); - - FakeRestRequest.Builder validRequest = new FakeRestRequest.Builder(xContentRegistry()).withPath("/some_index/_doc/some_id"); - dispatchRequest(validRequest.withMethod(Method.HEAD).build()); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java index 5563003fd923d..ca6ecd052fe6a 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java @@ -36,10 +36,8 @@ import org.opensearch.action.get.GetResponse; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.index.get.GetResult; import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.document.RestGetSourceAction.RestGetSourceResponseListener; import org.opensearch.test.rest.FakeRestChannel; @@ -48,10 +46,6 @@ import org.junit.AfterClass; import org.junit.Before; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; - import static java.util.Collections.emptyMap; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.opensearch.rest.RestStatus.OK; @@ -75,58 +69,10 @@ public static void cleanupReferences() { listener = null; } - /** - * test deprecation is logged if type is used in path - */ - public void testTypeInPath() { - boolean assertWarnings = true; - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - for (Method method : Arrays.asList(Method.GET, Method.HEAD)) { - // Ensure we have a fresh context for each request so we don't get duplicate headers - try (ThreadContext.StoredContext ignore = verifyingClient.threadPool().getThreadContext().stashContext()) { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(method) - .withPath("/some_index/some_type/id/_source") - .build(); - - dispatchRequest(request); - if (assertWarnings) { - assertWarnings(RestGetSourceAction.TYPES_DEPRECATION_MESSAGE); - assertWarnings = false; - } - } - } - } - - /** - * test deprecation is logged if type is used as parameter - */ - public void testTypeParameter() { - boolean assertWarnings = true; - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - Map params = new HashMap<>(); - params.put("type", "some_type"); - for (Method method : Arrays.asList(Method.GET, Method.HEAD)) { - // Ensure we have a fresh context for each request so we don't get duplicate headers - try (ThreadContext.StoredContext ignore = verifyingClient.threadPool().getThreadContext().stashContext()) { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(method) - .withPath("/some_index/_source/id") - .withParams(params) - .build(); - dispatchRequest(request); - if (assertWarnings) { - assertWarnings(RestGetSourceAction.TYPES_DEPRECATION_MESSAGE); - assertWarnings = false; - } - } - } - } - public void testRestGetSourceAction() throws Exception { final BytesReference source = new BytesArray("{\"foo\": \"bar\"}"); final GetResponse response = new GetResponse( - new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, source, emptyMap(), null) + new GetResult("index1", "1", UNASSIGNED_SEQ_NO, 0, -1, true, source, emptyMap(), null) ); final RestResponse restResponse = listener.buildResponse(response); @@ -137,22 +83,18 @@ public void testRestGetSourceAction() throws Exception { } public void testRestGetSourceActionWithMissingDocument() { - final GetResponse response = new GetResponse( - new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap(), null) - ); + final GetResponse response = new GetResponse(new GetResult("index1", "1", UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap(), null)); final ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, () -> listener.buildResponse(response)); - assertThat(exception.getMessage(), equalTo("Document not found [index1]/[_doc]/[1]")); + assertThat(exception.getMessage(), equalTo("Document not found [index1]/[1]")); } public void testRestGetSourceActionWithMissingDocumentSource() { - final GetResponse response = new GetResponse( - new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, null, emptyMap(), null) - ); + final GetResponse response = new GetResponse(new GetResult("index1", "1", UNASSIGNED_SEQ_NO, 0, -1, true, null, emptyMap(), null)); final ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, () -> listener.buildResponse(response)); - assertThat(exception.getMessage(), equalTo("Source not found [index1]/[_doc]/[1]")); + assertThat(exception.getMessage(), equalTo("Source not found [index1]/[1]")); } } diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java index 021563d232965..5a1d43ff5dd04 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java @@ -67,26 +67,14 @@ public void setUpAction() { controller().registerHandler(new AutoIdHandler(() -> clusterStateSupplier.get().nodes())); } - public void testTypeInPath() { - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) - .withPath("/some_index/some_type/some_id") - .build(); - dispatchRequest(deprecatedRequest); - assertWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE); - + public void testPath() { RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) .withPath("/some_index/_doc/some_id") .build(); dispatchRequest(validRequest); } - public void testCreateWithTypeInPath() { - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) - .withPath("/some_index/some_type/some_id/_create") - .build(); - dispatchRequest(deprecatedRequest); - assertWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE); - + public void testCreatePath() { RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) .withPath("/some_index/_create/some_id") .build(); diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestMultiGetActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestMultiGetActionTests.java deleted file mode 100644 index b157c3b070fad..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/document/RestMultiGetActionTests.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.document; - -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -public class RestMultiGetActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestMultiGetAction(Settings.EMPTY)); - } - - public void testTypeInPath() { - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.GET) - .withPath("some_index/some_type/_mget") - .build(); - dispatchRequest(deprecatedRequest); - assertWarnings(RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); - - RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.GET) - .withPath("some_index/_mget") - .build(); - dispatchRequest(validRequest); - } - - public void testTypeInBody() throws Exception { - XContentBuilder content = XContentFactory.jsonBuilder() - .startObject() - .startArray("docs") - .startObject() - .field("_index", "some_index") - .field("_type", "_doc") - .field("_id", "2") - .endObject() - .startObject() - .field("_index", "test") - .field("_id", "2") - .endObject() - .endArray() - .endObject(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("_mget") - .withContent(BytesReference.bytes(content), XContentType.JSON) - .build(); - dispatchRequest(request); - assertWarnings(RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestMultiTermVectorsActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestMultiTermVectorsActionTests.java deleted file mode 100644 index fa2d580b24e89..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/document/RestMultiTermVectorsActionTests.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.document; - -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -public class RestMultiTermVectorsActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestMultiTermVectorsAction()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.POST) - .withPath("/some_index/some_type/_mtermvectors") - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestMultiTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.GET) - .withPath("/some_index/_mtermvectors") - .withParams(params) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestMultiTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() throws IOException { - XContentBuilder content = XContentFactory.jsonBuilder() - .startObject() - .startArray("docs") - .startObject() - .field("_type", "some_type") - .field("_id", 1) - .endObject() - .endArray() - .endObject(); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.GET) - .withPath("/some_index/_mtermvectors") - .withContent(BytesReference.bytes(content), XContentType.JSON) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestTermVectorsActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestTermVectorsActionTests.java deleted file mode 100644 index 811c81e391acb..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/document/RestTermVectorsActionTests.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.document; - -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.io.IOException; - -public class RestTermVectorsActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestTermVectorsAction()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.POST) - .withPath("/some_index/some_type/some_id/_termvectors") - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() throws IOException { - XContentBuilder content = XContentFactory.jsonBuilder().startObject().field("_type", "some_type").field("_id", 1).endObject(); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.GET) - .withPath("/some_index/_termvectors/some_id") - .withContent(BytesReference.bytes(content), XContentType.JSON) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java index 5706311425e7c..bf544acf13508 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java @@ -38,7 +38,6 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; @@ -59,22 +58,6 @@ public void setUpAction() { controller().registerHandler(action); } - public void testTypeInPath() { - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.POST) - .withPath("/some_index/some_type/some_id/_update") - .build(); - dispatchRequest(deprecatedRequest); - assertWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE); - - RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.POST) - .withPath("/some_index/_update/some_id") - .build(); - dispatchRequest(validRequest); - } - public void testUpdateDocVersion() { Map params = new HashMap<>(); if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/rest/action/search/RestCountActionTests.java b/server/src/test/java/org/opensearch/rest/action/search/RestCountActionTests.java deleted file mode 100644 index bc1af7f41ceed..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/search/RestCountActionTests.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.search; - -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -public class RestCountActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestCountAction()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.POST) - .withPath("/some_index/some_type/_count") - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestCountAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(Method.GET) - .withPath("/some_index/_count") - .withParams(params) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestCountAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/search/RestExplainActionTests.java b/server/src/test/java/org/opensearch/rest/action/search/RestExplainActionTests.java deleted file mode 100644 index 1e0668ad15262..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/search/RestExplainActionTests.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.search; - -import org.opensearch.rest.RestRequest; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -public class RestExplainActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestExplainAction()); - } - - public void testTypeInPath() { - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteVerifier((arg1, arg2) -> null); - - RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/some_id/_explain") - .build(); - dispatchRequest(deprecatedRequest); - assertWarnings(RestExplainAction.TYPES_DEPRECATION_MESSAGE); - - RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/_explain/some_id") - .build(); - dispatchRequest(validRequest); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/search/RestMultiSearchActionTests.java b/server/src/test/java/org/opensearch/rest/action/search/RestMultiSearchActionTests.java deleted file mode 100644 index 7bec390cd7901..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/search/RestMultiSearchActionTests.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.search; - -import org.opensearch.common.bytes.BytesArray; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.rest.RestRequest; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.nio.charset.StandardCharsets; - -public class RestMultiSearchActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestMultiSearchAction(Settings.EMPTY)); - } - - public void testTypeInPath() { - String content = "{ \"index\": \"some_index\" } \n {} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_msearch") - .withContent(bytesContent, XContentType.JSON) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() { - String content = "{ \"index\": \"some_index\", \"type\": \"some_type\" } \n {} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) - .withPath("/some_index/_msearch") - .withContent(bytesContent, XContentType.JSON) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/opensearch/rest/action/search/RestSearchActionTests.java b/server/src/test/java/org/opensearch/rest/action/search/RestSearchActionTests.java deleted file mode 100644 index a343c93d717b9..0000000000000 --- a/server/src/test/java/org/opensearch/rest/action/search/RestSearchActionTests.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.search; - -import org.opensearch.rest.RestRequest; -import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -public class RestSearchActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - controller().registerHandler(new RestSearchAction()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_search") - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/some_index/_search") - .withParams(params) - .build(); - - // We're not actually testing anything to do with the client, but need to set this so it doesn't fail the test for being unset. - verifyingClient.setExecuteLocallyVerifier((arg1, arg2) -> null); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index c80c01cdc1673..2bd8c697adb3c 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -97,7 +97,6 @@ public void testPreProcess() throws Exception { when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); when(shardSearchRequest.shardId()).thenReturn(shardId); - when(shardSearchRequest.types()).thenReturn(new String[] {}); ThreadPool threadPool = new TestThreadPool(this.getClass().getName()); IndexShard indexShard = mock(IndexShard.class); diff --git a/server/src/test/java/org/opensearch/search/SearchHitTests.java b/server/src/test/java/org/opensearch/search/SearchHitTests.java index cee9692747d54..8307b7faa71f1 100644 --- a/server/src/test/java/org/opensearch/search/SearchHitTests.java +++ b/server/src/test/java/org/opensearch/search/SearchHitTests.java @@ -41,7 +41,6 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; @@ -83,7 +82,6 @@ public static SearchHit createTestItem(boolean withOptionalInnerHits, boolean wi public static SearchHit createTestItem(XContentType xContentType, boolean withOptionalInnerHits, boolean transportSerialization) { int internalId = randomInt(); String uid = randomAlphaOfLength(10); - Text type = new Text(randomAlphaOfLengthBetween(5, 10)); NestedIdentity nestedIdentity = null; if (randomBoolean()) { nestedIdentity = NestedIdentityTests.createTestItem(randomIntBetween(0, 2)); @@ -97,7 +95,7 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp } } - SearchHit hit = new SearchHit(internalId, uid, type, nestedIdentity, documentFields, metaFields); + SearchHit hit = new SearchHit(internalId, uid, nestedIdentity, documentFields, metaFields); if (frequently()) { if (rarely()) { hit.score(Float.NaN); @@ -234,16 +232,15 @@ public void testFromXContentWithoutTypeAndId() throws IOException { } assertEquals("my_index", parsed.getIndex()); assertEquals(1, parsed.getScore(), Float.MIN_VALUE); - assertNull(parsed.getType()); assertNull(parsed.getId()); } public void testToXContent() throws IOException { - SearchHit searchHit = new SearchHit(1, "id1", new Text("type"), Collections.emptyMap(), Collections.emptyMap()); + SearchHit searchHit = new SearchHit(1, "id1", Collections.emptyMap(), Collections.emptyMap()); searchHit.score(1.5f); XContentBuilder builder = JsonXContent.contentBuilder(); searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":1.5}", Strings.toString(builder)); + assertEquals("{\"_id\":\"id1\",\"_score\":1.5}", Strings.toString(builder)); } public void testSerializeShardTarget() throws Exception { @@ -256,25 +253,25 @@ public void testSerializeShardTarget() throws Exception { ); Map innerHits = new HashMap<>(); - SearchHit innerHit1 = new SearchHit(0, "_id", new Text("_type"), null, null); + SearchHit innerHit1 = new SearchHit(0, "_id", null, null); innerHit1.shard(target); - SearchHit innerInnerHit2 = new SearchHit(0, "_id", new Text("_type"), null, null); + SearchHit innerInnerHit2 = new SearchHit(0, "_id", null, null); innerInnerHit2.shard(target); innerHits.put("1", new SearchHits(new SearchHit[] { innerInnerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); innerHit1.setInnerHits(innerHits); - SearchHit innerHit2 = new SearchHit(0, "_id", new Text("_type"), null, null); + SearchHit innerHit2 = new SearchHit(0, "_id", null, null); innerHit2.shard(target); - SearchHit innerHit3 = new SearchHit(0, "_id", new Text("_type"), null, null); + SearchHit innerHit3 = new SearchHit(0, "_id", null, null); innerHit3.shard(target); innerHits = new HashMap<>(); - SearchHit hit1 = new SearchHit(0, "_id", new Text("_type"), null, null); + SearchHit hit1 = new SearchHit(0, "_id", null, null); innerHits.put("1", new SearchHits(new SearchHit[] { innerHit1, innerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); innerHits.put("2", new SearchHits(new SearchHit[] { innerHit3 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); hit1.shard(target); hit1.setInnerHits(innerHits); - SearchHit hit2 = new SearchHit(0, "_id", new Text("_type"), null, null); + SearchHit hit2 = new SearchHit(0, "_id", null, null); hit2.shard(target); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); @@ -301,7 +298,7 @@ public void testSerializeShardTarget() throws Exception { } public void testNullSource() { - SearchHit searchHit = new SearchHit(0, "_id", new Text("_type"), null, null); + SearchHit searchHit = new SearchHit(0, "_id", null, null); assertThat(searchHit.getSourceAsMap(), nullValue()); assertThat(searchHit.getSourceRef(), nullValue()); @@ -325,7 +322,6 @@ public void testWeirdScriptFields() throws Exception { XContentType.JSON.xContent(), "{\n" + " \"_index\": \"twitter\",\n" - + " \"_type\": \"tweet\",\n" + " \"_id\": \"1\",\n" + " \"_score\": 1.0,\n" + " \"fields\": {\n" @@ -346,7 +342,6 @@ public void testWeirdScriptFields() throws Exception { XContentType.JSON.xContent(), "{\n" + " \"_index\": \"twitter\",\n" - + " \"_type\": \"tweet\",\n" + " \"_id\": \"1\",\n" + " \"_score\": 1.0,\n" + " \"fields\": {\n" @@ -371,7 +366,6 @@ public void testWeirdScriptFields() throws Exception { JsonXContent.jsonXContent, "{\n" + " \"_index\": \"twitter\",\n" - + " \"_type\": \"tweet\",\n" + " \"_id\": \"1\",\n" + " \"_score\": 1.0,\n" + " \"fields\": {\n" diff --git a/server/src/test/java/org/opensearch/search/SearchHitsTests.java b/server/src/test/java/org/opensearch/search/SearchHitsTests.java index c239e87c30e38..fc5bfc90a1e34 100644 --- a/server/src/test/java/org/opensearch/search/SearchHitsTests.java +++ b/server/src/test/java/org/opensearch/search/SearchHitsTests.java @@ -40,7 +40,6 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.lucene.LuceneTests; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; @@ -248,8 +247,8 @@ protected SearchHits doParseInstance(XContentParser parser) throws IOException { public void testToXContent() throws IOException { SearchHit[] hits = new SearchHit[] { - new SearchHit(1, "id1", new Text("type"), Collections.emptyMap(), Collections.emptyMap()), - new SearchHit(2, "id2", new Text("type"), Collections.emptyMap(), Collections.emptyMap()) }; + new SearchHit(1, "id1", Collections.emptyMap(), Collections.emptyMap()), + new SearchHit(2, "id2", Collections.emptyMap(), Collections.emptyMap()) }; long totalHits = 1000; float maxScore = 1.5f; @@ -260,8 +259,8 @@ public void testToXContent() throws IOException { builder.endObject(); assertEquals( "{\"hits\":{\"total\":{\"value\":1000,\"relation\":\"eq\"},\"max_score\":1.5," - + "\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":null}," - + "{\"_type\":\"type\",\"_id\":\"id2\",\"_score\":null}]}}", + + "\"hits\":[{\"_id\":\"id1\",\"_score\":null}," + + "{\"_id\":\"id2\",\"_score\":null}]}}", Strings.toString(builder) ); } @@ -269,9 +268,9 @@ public void testToXContent() throws IOException { public void testFromXContentWithShards() throws IOException { for (boolean withExplanation : new boolean[] { true, false }) { final SearchHit[] hits = new SearchHit[] { - new SearchHit(1, "id1", new Text("type"), Collections.emptyMap(), Collections.emptyMap()), - new SearchHit(2, "id2", new Text("type"), Collections.emptyMap(), Collections.emptyMap()), - new SearchHit(10, "id10", new Text("type"), Collections.emptyMap(), Collections.emptyMap()) }; + new SearchHit(1, "id1", Collections.emptyMap(), Collections.emptyMap()), + new SearchHit(2, "id2", Collections.emptyMap(), Collections.emptyMap()), + new SearchHit(10, "id10", Collections.emptyMap(), Collections.emptyMap()) }; for (SearchHit hit : hits) { String index = randomAlphaOfLengthBetween(5, 10); diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 5575e27619fa9..7ba31d158f3c5 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1180,7 +1180,7 @@ public void testCreateSearchContextFailure() throws Exception { final IndexService indexService = createIndex(index); final SearchService service = getInstanceFromNode(SearchService.class); final ShardId shardId = new ShardId(indexService.index(), 0); - final ShardSearchRequest request = new ShardSearchRequest(shardId, new String[0], 0, null) { + final ShardSearchRequest request = new ShardSearchRequest(shardId, 0, null) { @Override public SearchType searchType() { // induce an artificial NPE diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java index d48a1139d8678..24022a6f41a09 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -102,11 +102,11 @@ protected void indexData() throws Exception { indexRandom(true, docs); - SearchResponse resp = client().prepareSearch("idx").setTypes("type").setRouting(routing1).setQuery(matchAllQuery()).get(); + SearchResponse resp = client().prepareSearch("idx").setRouting(routing1).setQuery(matchAllQuery()).get(); assertSearchResponse(resp); long totalOnOne = resp.getHits().getTotalHits().value; assertThat(totalOnOne, is(15L)); - resp = client().prepareSearch("idx").setTypes("type").setRouting(routing2).setQuery(matchAllQuery()).get(); + resp = client().prepareSearch("idx").setRouting(routing2).setQuery(matchAllQuery()).get(); assertSearchResponse(resp); long totalOnTwo = resp.getHits().getTotalHits().value; assertThat(totalOnTwo, is(12L)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index 4316acc8c2b85..c4a87f3993bb4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -37,6 +37,7 @@ import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.sort.SortOrder; import java.util.ArrayList; @@ -69,6 +70,7 @@ private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { if (randomBoolean()) { histo.missingBucket(true); } + histo.missingOrder(randomFrom(MissingOrder.values())); return histo; } @@ -94,6 +96,7 @@ private TermsValuesSourceBuilder randomTermsSourceBuilder() { if (randomBoolean()) { terms.missingBucket(true); } + terms.missingOrder(randomFrom(MissingOrder.values())); return terms; } @@ -108,6 +111,7 @@ private HistogramValuesSourceBuilder randomHistogramSourceBuilder() { histo.missingBucket(true); } histo.interval(randomDoubleBetween(Math.nextUp(0), Double.MAX_VALUE, false)); + histo.missingOrder(randomFrom(MissingOrder.values())); return histo; } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index e8df1753a79b8..f81bd012bfa63 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -82,6 +82,7 @@ import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.InternalMax; @@ -586,6 +587,84 @@ public void testWithKeywordAndMissingBucket() throws Exception { assertEquals(0, result.getBuckets().size()); assertNull(result.afterKey()); }); + + // sort ascending, null bucket is first, same as default. + testSearchCase(Arrays.asList(new MatchAllDocsQuery()), dataset, () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword").field("keyword") + .missingBucket(true) + .missingOrder(MissingOrder.FIRST); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); + }, (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=d}", result.afterKey().toString()); + assertEquals("{keyword=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{keyword=a}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("{keyword=c}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("{keyword=d}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + }); + + // sort ascending, null bucket is last. + testSearchCase(Arrays.asList(new MatchAllDocsQuery()), dataset, () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword").field("keyword") + .missingBucket(true) + .missingOrder(MissingOrder.LAST); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); + }, (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=null}", result.afterKey().toString()); + assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + assertEquals("{keyword=null}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(3).getDocCount()); + }); + + // sort descending, null bucket is last, same as default + testSearchCase(Arrays.asList(new MatchAllDocsQuery()), dataset, () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword").field("keyword") + .missingBucket(true) + .missingOrder(MissingOrder.LAST) + .order(SortOrder.DESC); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); + }, (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=null}", result.afterKey().toString()); + assertEquals("{keyword=null}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(3).getDocCount()); + assertEquals("{keyword=a}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("{keyword=d}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + }); + + // sort descending, null bucket is first + testSearchCase(Arrays.asList(new MatchAllDocsQuery()), dataset, () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword").field("keyword") + .missingBucket(true) + .missingOrder(MissingOrder.FIRST) + .order(SortOrder.DESC); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); + }, (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=a}", result.afterKey().toString()); + assertEquals("{keyword=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{keyword=d}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{keyword=c}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("{keyword=a}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(3).getDocCount()); + }); } public void testWithKeywordMissingAfter() throws Exception { @@ -901,14 +980,14 @@ public void testWithKeywordLongAndMissingBucket() throws Exception { final List>> dataset = new ArrayList<>(); dataset.addAll( Arrays.asList( - createDocument("keyword", "a", "long", 100L), + createDocument("double", 0d, "keyword", "a", "long", 100L), createDocument("double", 0d), - createDocument("keyword", "c", "long", 100L), - createDocument("keyword", "a", "long", 0L), - createDocument("keyword", "d", "long", 10L), - createDocument("keyword", "c"), - createDocument("keyword", "c", "long", 100L), - createDocument("long", 100L), + createDocument("double", 0d, "keyword", "c", "long", 100L), + createDocument("double", 0d, "keyword", "a", "long", 0L), + createDocument("double", 0d, "keyword", "d", "long", 10L), + createDocument("double", 0d, "keyword", "c"), + createDocument("double", 0d, "keyword", "c", "long", 100L), + createDocument("double", 0d, "long", 100L), createDocument("double", 0d) ) ); @@ -961,6 +1040,112 @@ public void testWithKeywordLongAndMissingBucket() throws Exception { assertEquals(1L, result.getBuckets().get(1).getDocCount()); } ); + + // keyword null bucket is last, long null bucket is last + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("double")), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new TermsValuesSourceBuilder("keyword").field("keyword").missingBucket(true).missingOrder(MissingOrder.LAST), + new TermsValuesSourceBuilder("long").field("long").missingBucket(true).missingOrder(MissingOrder.LAST) + ) + ), + (result) -> { + assertEquals(7, result.getBuckets().size()); + assertEquals("{keyword=null, long=null}", result.afterKey().toString()); + assertEquals("{keyword=null, long=null}", result.getBuckets().get(6).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(6).getDocCount()); + assertEquals("{keyword=null, long=100}", result.getBuckets().get(5).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(5).getDocCount()); + } + ); + + // keyword null bucket is last, long null bucket is first + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("double")), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new TermsValuesSourceBuilder("keyword").field("keyword").missingBucket(true).missingOrder(MissingOrder.LAST), + new TermsValuesSourceBuilder("long").field("long").missingBucket(true).missingOrder(MissingOrder.FIRST) + ) + ), + (result) -> { + assertEquals(7, result.getBuckets().size()); + assertEquals("{keyword=null, long=100}", result.afterKey().toString()); + assertEquals("{keyword=null, long=100}", result.getBuckets().get(6).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(6).getDocCount()); + assertEquals("{keyword=null, long=null}", result.getBuckets().get(5).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(5).getDocCount()); + } + ); + + // asc, null bucket is last, search after non null value + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("double")), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList(new TermsValuesSourceBuilder("keyword").field("keyword").missingBucket(true).missingOrder(MissingOrder.LAST)) + ).aggregateAfter(createAfterKey("keyword", "c")), + (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=null}", result.afterKey().toString()); + assertEquals("{keyword=d}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{keyword=null}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + } + ); + + // desc, null bucket is last, search after non null value + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("double")), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new TermsValuesSourceBuilder("keyword").field("keyword") + .missingBucket(true) + .missingOrder(MissingOrder.LAST) + .order(SortOrder.DESC) + ) + ).aggregateAfter(createAfterKey("keyword", "c")), + (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=null}", result.afterKey().toString()); + assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{keyword=null}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + } + ); + + // keyword null bucket is last, long null bucket is last + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("double")), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new TermsValuesSourceBuilder("keyword").field("keyword").missingBucket(true).missingOrder(MissingOrder.LAST), + new TermsValuesSourceBuilder("long").field("long").missingBucket(true).missingOrder(MissingOrder.LAST) + ) + ).aggregateAfter(createAfterKey("keyword", "c", "long", null)), + (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=null, long=null}", result.afterKey().toString()); + assertEquals("{keyword=d, long=10}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{keyword=null, long=100}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{keyword=null, long=null}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + } + ); } public void testMultiValuedWithKeywordAndLong() throws Exception { @@ -1719,6 +1904,240 @@ public void testWithHistogramAndKeyword() throws IOException { ); } + public void testWithHistogramBucketMissing() throws IOException { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("price", 50L, "long", 1L), + createDocument("price", 60L, "long", 2L), + createDocument("price", 70L, "long", 3L), + createDocument("price", 62L, "long", 4L), + createDocument("long", 5L) + ) + ); + + // asc, null bucket is first + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.FIRST) + .interval(10) + ) + ), + (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{price=70.0}", result.afterKey().toString()); + assertEquals("{price=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{price=50.0}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{price=60.0}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("{price=70.0}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + } + ); + + // asc, null bucket is first, after 50.0 + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.FIRST) + .interval(10) + ) + ).aggregateAfter(createAfterKey("price", 60.0d)), + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{price=70.0}", result.afterKey().toString()); + assertEquals("{price=70.0}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + } + ); + + // asc, null bucket is first, after null + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.FIRST) + .interval(10) + ) + ).aggregateAfter(createAfterKey("price", null)), + (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{price=70.0}", result.afterKey().toString()); + assertEquals("{price=50.0}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{price=60.0}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("{price=70.0}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + } + ); + + // asc, null bucket is last + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.LAST) + .interval(10) + ) + ), + (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{price=null}", result.afterKey().toString()); + assertEquals("{price=50.0}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{price=60.0}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("{price=70.0}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + assertEquals("{price=null}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + } + ); + + // asc, null bucket is last, after 70.0 + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.LAST) + .interval(10) + ) + ).aggregateAfter(createAfterKey("price", 70.0)), + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{price=null}", result.afterKey().toString()); + assertEquals("{price=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + } + ); + + // desc, null bucket is first + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.FIRST) + .order(SortOrder.DESC) + .interval(10) + ) + ), + (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{price=50.0}", result.afterKey().toString()); + assertEquals("{price=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{price=70.0}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{price=60.0}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("{price=50.0}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + } + ); + + // desc, null bucket is first, after 60.0 + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.FIRST) + .order(SortOrder.DESC) + .interval(10) + ) + ).aggregateAfter(createAfterKey("price", 60.0)), + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{price=50.0}", result.afterKey().toString()); + assertEquals("{price=50.0}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + } + ); + + // desc, null bucket is last + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.LAST) + .order(SortOrder.DESC) + .interval(10) + ) + ), + (result) -> { + assertEquals(4, result.getBuckets().size()); + assertEquals("{price=null}", result.afterKey().toString()); + assertEquals("{price=70.0}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{price=60.0}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("{price=50.0}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + assertEquals("{price=null}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + } + ); + + // desc, null bucket is last, after 50.0 + testSearchCase( + Arrays.asList(new MatchAllDocsQuery()), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new HistogramValuesSourceBuilder("price").field("price") + .missingBucket(true) + .missingOrder(MissingOrder.LAST) + .order(SortOrder.DESC) + .interval(10) + ) + ).aggregateAfter(createAfterKey("price", 50.0)), + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{price=null}", result.afterKey().toString()); + assertEquals("{price=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + } + ); + } + public void testWithKeywordAndDateHistogram() throws IOException { final List>> dataset = new ArrayList<>(); dataset.addAll( diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java index a256a37814d62..0ad6d30df337f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java @@ -63,6 +63,7 @@ import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; import java.util.ArrayList; @@ -277,6 +278,7 @@ private void testRandomCase(boolean forceMerge, boolean missingBucket, int index value -> value, DocValueFormat.RAW, missingBucket, + MissingOrder.DEFAULT, size, 1 ); @@ -287,6 +289,7 @@ private void testRandomCase(boolean forceMerge, boolean missingBucket, int index context -> FieldData.sortableLongBitsToDoubles(DocValues.getSortedNumeric(context.reader(), fieldType.name())), DocValueFormat.RAW, missingBucket, + MissingOrder.DEFAULT, size, 1 ); @@ -300,6 +303,7 @@ private void testRandomCase(boolean forceMerge, boolean missingBucket, int index context -> DocValues.getSortedSet(context.reader(), fieldType.name()), DocValueFormat.RAW, missingBucket, + MissingOrder.DEFAULT, size, 1 ); @@ -311,6 +315,7 @@ private void testRandomCase(boolean forceMerge, boolean missingBucket, int index context -> FieldData.toString(DocValues.getSortedSet(context.reader(), fieldType.name())), DocValueFormat.RAW, missingBucket, + MissingOrder.DEFAULT, size, 1 ); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java index 311c688f23ff6..4121954c1ede2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -39,6 +39,7 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.ParsedAggregation; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.test.InternalMultiBucketAggregationTestCase; import org.junit.After; @@ -71,6 +72,7 @@ public class InternalCompositeTests extends InternalMultiBucketAggregationTestCa private List sourceNames; private List formats; private int[] reverseMuls; + private MissingOrder[] missingOrders; private int[] types; private int size; @@ -100,10 +102,12 @@ public void setUp() throws Exception { sourceNames = new ArrayList<>(); formats = new ArrayList<>(); reverseMuls = new int[numFields]; + missingOrders = new MissingOrder[numFields]; types = new int[numFields]; for (int i = 0; i < numFields; i++) { sourceNames.add("field_" + i); reverseMuls[i] = randomBoolean() ? 1 : -1; + missingOrders[i] = randomFrom(MissingOrder.values()); int type = randomIntBetween(0, 2); types[i] = type; formats.add(randomDocValueFormat(type == 0)); @@ -182,6 +186,7 @@ protected InternalComposite createTestInstance(String name, Map formats, key, reverseMuls, + missingOrders, 1L, aggregations ); @@ -189,7 +194,18 @@ protected InternalComposite createTestInstance(String name, Map } Collections.sort(buckets, (o1, o2) -> o1.compareKey(o2)); CompositeKey lastBucket = buckets.size() > 0 ? buckets.get(buckets.size() - 1).getRawKey() : null; - return new InternalComposite(name, size, sourceNames, formats, buckets, lastBucket, reverseMuls, randomBoolean(), metadata); + return new InternalComposite( + name, + size, + sourceNames, + formats, + buckets, + lastBucket, + reverseMuls, + missingOrders, + randomBoolean(), + metadata + ); } @Override @@ -214,6 +230,7 @@ protected InternalComposite mutateInstance(InternalComposite instance) throws IO formats, createCompositeKey(), reverseMuls, + missingOrders, randomLongBetween(1, 100), InternalAggregations.EMPTY ) @@ -239,6 +256,7 @@ protected InternalComposite mutateInstance(InternalComposite instance) throws IO buckets, lastBucket, reverseMuls, + missingOrders, randomBoolean(), metadata ); @@ -295,6 +313,7 @@ public void testReduceUnmapped() throws IOException { emptyList(), null, reverseMuls, + missingOrders, true, emptyMap() ); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java index 19cf1ef7044c9..6569a269169eb 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java @@ -47,6 +47,7 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.test.OpenSearchTestCase; import static org.mockito.Mockito.mock; @@ -62,6 +63,7 @@ public void testBinarySorted() { context -> null, DocValueFormat.RAW, false, + MissingOrder.DEFAULT, 1, 1 ); @@ -79,6 +81,7 @@ public void testBinarySorted() { context -> null, DocValueFormat.RAW, true, + MissingOrder.DEFAULT, 1, 1 ); @@ -92,13 +95,24 @@ public void testBinarySorted() { context -> null, DocValueFormat.RAW, false, + MissingOrder.DEFAULT, 0, -1 ); assertNull(source.createSortedDocsProducerOrNull(reader, null)); MappedFieldType ip = new IpFieldMapper.IpFieldType("ip"); - source = new BinaryValuesSource(BigArrays.NON_RECYCLING_INSTANCE, (b) -> {}, ip, context -> null, DocValueFormat.RAW, false, 1, 1); + source = new BinaryValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + (b) -> {}, + ip, + context -> null, + DocValueFormat.RAW, + false, + MissingOrder.DEFAULT, + 1, + 1 + ); assertNull(source.createSortedDocsProducerOrNull(reader, null)); } @@ -110,6 +124,7 @@ public void testGlobalOrdinalsSorted() { context -> null, DocValueFormat.RAW, false, + MissingOrder.DEFAULT, 1, 1 ); @@ -120,7 +135,16 @@ public void testGlobalOrdinalsSorted() { assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("foo", "bar")))); assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)")))); - source = new GlobalOrdinalValuesSource(BigArrays.NON_RECYCLING_INSTANCE, keyword, context -> null, DocValueFormat.RAW, true, 1, 1); + source = new GlobalOrdinalValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + keyword, + context -> null, + DocValueFormat.RAW, + true, + MissingOrder.DEFAULT, + 1, + 1 + ); assertNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); assertNull(source.createSortedDocsProducerOrNull(reader, null)); assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("foo", "bar")))); @@ -131,6 +155,7 @@ public void testGlobalOrdinalsSorted() { context -> null, DocValueFormat.RAW, false, + MissingOrder.DEFAULT, 1, -1 ); @@ -138,7 +163,16 @@ public void testGlobalOrdinalsSorted() { assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("foo", "bar")))); final MappedFieldType ip = new IpFieldMapper.IpFieldType("ip"); - source = new GlobalOrdinalValuesSource(BigArrays.NON_RECYCLING_INSTANCE, ip, context -> null, DocValueFormat.RAW, false, 1, 1); + source = new GlobalOrdinalValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + ip, + context -> null, + DocValueFormat.RAW, + false, + MissingOrder.DEFAULT, + 1, + 1 + ); assertNull(source.createSortedDocsProducerOrNull(reader, null)); assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("foo", "bar")))); } @@ -159,6 +193,7 @@ public void testNumericSorted() { value -> value, DocValueFormat.RAW, false, + MissingOrder.DEFAULT, 1, 1 ); @@ -192,6 +227,7 @@ public void testNumericSorted() { value -> value, DocValueFormat.RAW, true, + MissingOrder.DEFAULT, 1, 1 ); @@ -213,6 +249,7 @@ public void testNumericSorted() { value -> value, DocValueFormat.RAW, false, + MissingOrder.DEFAULT, 1, -1 ); @@ -231,6 +268,7 @@ public void testNumericSorted() { context -> null, DocValueFormat.RAW, false, + MissingOrder.DEFAULT, 1, 1 ); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java index c1e5c69fbb974..1446feefb2c7a 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -269,7 +269,6 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < totalHits; i++) { SearchHit searchHit = response.getHits().getAt(i); assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getIndex(), equalTo("high_card_idx")); - assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getType(), equalTo("type")); DocumentField hitField = searchHit.field(NUMBER_FIELD_NAME); assertThat("Hit " + i + " has wrong number of values", hitField.getValues().size(), equalTo(1)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java index 20807f4330bbd..07037be28a19f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java @@ -45,7 +45,6 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; -import org.opensearch.common.text.Text; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; @@ -174,7 +173,7 @@ private InternalTopHits createTestInstance( Map searchHitFields = new HashMap<>(); scoreDocs[i] = docBuilder.apply(docId, score); - hits[i] = new SearchHit(docId, Integer.toString(i), new Text("_docs"), searchHitFields, Collections.emptyMap()); + hits[i] = new SearchHit(docId, Integer.toString(i), searchHitFields, Collections.emptyMap()); hits[i].score(score); } int totalHits = between(actualSize, 500000); @@ -224,7 +223,6 @@ protected void assertFromXContent(InternalTopHits aggregation, ParsedAggregation SearchHit actual = actualHits.get(i); assertEquals(expected.getIndex(), actual.getIndex()); - assertEquals(expected.getType(), actual.getType()); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getVersion(), actual.getVersion()); assertEquals(expected.getScore(), actual.getScore(), 0.0f); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java index 2f48447c931b5..08a257935eb9e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java @@ -208,31 +208,25 @@ public void testSummationAccuracy() throws IOException { } private void verifySummationOfDoubles(double[] values, double expected, double delta) throws IOException { - testAggregation( - sum("_name").field(FIELD_NAME), - new MatchAllDocsQuery(), - iw -> { - /* - * The sum agg uses a Kahan sumation on the shard to limit - * floating point errors. But it doesn't ship the sums to the - * coordinating node, so floaing point error can creep in when - * reducing many sums. The test framework aggregates each - * segment as though it were a separate shard, then reduces - * those togther. Fun. But it means we don't get the full - * accuracy of the Kahan sumation. And *that* accuracy is - * what this method is trying to test. So we have to stick - * all the documents on the same leaf. `addDocuments` does - * that. - */ - iw.addDocuments( - Arrays.stream(values) - .mapToObj(value -> singleton(new NumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(value)))) - .collect(toList()) - ); - }, - result -> assertEquals(expected, result.getValue(), delta), - defaultFieldType(NumberType.DOUBLE) - ); + testAggregation(sum("_name").field(FIELD_NAME), new MatchAllDocsQuery(), iw -> { + /* + * The sum agg uses a Kahan sumation on the shard to limit + * floating point errors. But it doesn't ship the sums to the + * coordinating node, so floaing point error can creep in when + * reducing many sums. The test framework aggregates each + * segment as though it were a separate shard, then reduces + * those togther. Fun. But it means we don't get the full + * accuracy of the Kahan sumation. And *that* accuracy is + * what this method is trying to test. So we have to stick + * all the documents on the same leaf. `addDocuments` does + * that. + */ + iw.addDocuments( + Arrays.stream(values) + .mapToObj(value -> singleton(new NumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(value)))) + .collect(toList()) + ); + }, result -> assertEquals(expected, result.getValue(), delta), defaultFieldType(NumberType.DOUBLE)); } public void testUnmapped() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java index 89911f610ec4b..a8f9383c07125 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java @@ -98,11 +98,8 @@ public void testTopLevel() throws Exception { SearchHits searchHits = ((TopHits) result).getHits(); assertEquals(3L, searchHits.getTotalHits().value); assertEquals("3", searchHits.getAt(0).getId()); - assertEquals("type", searchHits.getAt(0).getType()); assertEquals("2", searchHits.getAt(1).getId()); - assertEquals("type", searchHits.getAt(1).getType()); assertEquals("1", searchHits.getAt(2).getId()); - assertEquals("type", searchHits.getAt(2).getType()); assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) result))); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java index 3709f4daefaca..98e4e8f881b1b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; import org.opensearch.script.AggregationScript; import org.opensearch.search.aggregations.support.values.ScriptBytesValues; import org.opensearch.search.aggregations.support.values.ScriptDoubleValues; @@ -60,7 +59,7 @@ private static class FakeAggregationScript extends AggregationScript { int index; FakeAggregationScript(Object[][] values) { - super(Collections.emptyMap(), new SearchLookup(null, null, Strings.EMPTY_ARRAY) { + super(Collections.emptyMap(), new SearchLookup(null, null) { @Override public LeafSearchLookup getLeafSearchLookup(LeafReaderContext context) { diff --git a/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java b/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java index 8913854df7e36..8a05f4041d0e1 100644 --- a/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java @@ -45,7 +45,6 @@ import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.TextSearchInfo; import org.opensearch.index.mapper.ValueFetcher; @@ -239,7 +238,7 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java index b74aa0e683eaf..92b398f3030e1 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java @@ -190,7 +190,7 @@ private HitContext hitExecuteMultiple( when(fetchContext.fetchSourceContext()).thenReturn(fetchSourceContext); when(fetchContext.getIndexName()).thenReturn("index"); - final SearchHit searchHit = new SearchHit(1, null, null, nestedIdentity, null, null); + final SearchHit searchHit = new SearchHit(1, null, nestedIdentity, null, null); // We don't need a real index, just a LeafReaderContext which cannot be mocked. MemoryIndex index = new MemoryIndex(); diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java index 8b070024b233b..8147d1afb8c15 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java @@ -32,13 +32,17 @@ package org.opensearch.search.fetch.subphase; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SourceLookup; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -428,20 +432,20 @@ public void testTextSubFields() throws IOException { } } - private Map fetchFields(MapperService mapperService, XContentBuilder source, String fieldPattern) + private static Map fetchFields(MapperService mapperService, XContentBuilder source, String fieldPattern) throws IOException { List fields = org.opensearch.common.collect.List.of(new FieldAndFormat(fieldPattern, null)); return fetchFields(mapperService, source, fields); } - private Map fetchFields(MapperService mapperService, XContentBuilder source, List fields) + private static Map fetchFields(MapperService mapperService, XContentBuilder source, List fields) throws IOException { SourceLookup sourceLookup = new SourceLookup(); sourceLookup.setSource(BytesReference.bytes(source)); - FieldFetcher fieldFetcher = FieldFetcher.create(mapperService, null, fields); + FieldFetcher fieldFetcher = FieldFetcher.create(createQueryShardContext(mapperService), null, fields); return fieldFetcher.fetch(sourceLookup, org.opensearch.common.collect.Set.of()); } @@ -480,4 +484,34 @@ public MapperService createMapperService() throws IOException { IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); return indexService.mapperService(); } + + private static QueryShardContext createQueryShardContext(MapperService mapperService) { + Settings settings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build(); + IndexMetadata indexMetadata = new IndexMetadata.Builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, settings); + return new QueryShardContext( + 0, + indexSettings, + null, + null, + null, + mapperService, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + } } diff --git a/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java index 190cf677c10c5..e6280e5c6924a 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java @@ -90,7 +90,7 @@ public void testNullShape() throws Exception { .setSource("{\"geo\": null}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); - GetResponse result = client().prepareGet(defaultIndexName, "_doc", "aNullshape").get(); + GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); assertThat(result.getField("location"), nullValue()); } diff --git a/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java index 0b925811161c2..cc3827168b82a 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java @@ -478,7 +478,7 @@ public void testEdgeCases() throws Exception { // This search would fail if both geoshape indexing and geoshape filtering // used the bottom-level optimization in SpatialPrefixTree#recursiveGetNodes. - SearchResponse searchResponse = client().prepareSearch("test").setTypes("type1").setQuery(geoIntersectionQuery("geo", query)).get(); + SearchResponse searchResponse = client().prepareSearch("test").setQuery(geoIntersectionQuery("geo", query)).get(); assertSearchResponse(searchResponse); assertHitCount(searchResponse, 1); @@ -655,10 +655,7 @@ public void testPointsOnly() throws Exception { } // test that point was inserted - SearchResponse response = client().prepareSearch("geo_points_only") - .setTypes("type1") - .setQuery(geoIntersectionQuery("location", shape)) - .get(); + SearchResponse response = client().prepareSearch("geo_points_only").setQuery(geoIntersectionQuery("location", shape)).get(); assertHitCount(response, 1); } @@ -699,7 +696,7 @@ public void testPointsOnlyExplicit() throws Exception { .get(); // test that point was inserted - SearchResponse response = client().prepareSearch("geo_points_only").setTypes("type1").setQuery(matchAllQuery()).get(); + SearchResponse response = client().prepareSearch("geo_points_only").setQuery(matchAllQuery()).get(); assertHitCount(response, 2); } diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java index 96fdf6a6da241..b047f16583ee5 100644 --- a/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java +++ b/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java @@ -39,7 +39,6 @@ import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; -import static org.opensearch.search.lookup.LeafDocLookup.TYPES_DEPRECATION_MESSAGE; import static org.mockito.AdditionalAnswers.returnsFirstArg; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; @@ -59,14 +58,13 @@ public void setUp() throws Exception { when(fieldType.valueForDisplay(any())).then(returnsFirstArg()); MapperService mapperService = mock(MapperService.class); - when(mapperService.fieldType("_type")).thenReturn(fieldType); when(mapperService.fieldType("field")).thenReturn(fieldType); when(mapperService.fieldType("alias")).thenReturn(fieldType); docValues = mock(ScriptDocValues.class); IndexFieldData fieldData = createFieldData(docValues); - docLookup = new LeafDocLookup(mapperService, ignored -> fieldData, new String[] { "type" }, null); + docLookup = new LeafDocLookup(mapperService, ignored -> fieldData, null); } public void testBasicLookup() { @@ -79,12 +77,6 @@ public void testFieldAliases() { assertEquals(docValues, fetchedDocValues); } - public void testTypesDeprecation() { - ScriptDocValues fetchedDocValues = docLookup.get("_type"); - assertEquals(docValues, fetchedDocValues); - assertWarnings(TYPES_DEPRECATION_MESSAGE); - } - private IndexFieldData createFieldData(ScriptDocValues scriptDocValues) { LeafFieldData leafFieldData = mock(LeafFieldData.class); doReturn(scriptDocValues).when(leafFieldData).getScriptValues(); diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java index 08c1d027385ea..cc7200bbf78c4 100644 --- a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java @@ -90,7 +90,7 @@ public void setUp() throws Exception { return null; }).when(leafReader).document(anyInt(), any(StoredFieldVisitor.class)); - fieldsLookup = new LeafFieldsLookup(mapperService, new String[] { "type" }, leafReader); + fieldsLookup = new LeafFieldsLookup(mapperService, leafReader); } public void testBasicLookup() { diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index ca2f04b70f3f8..07650d3c2a3e2 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -51,11 +51,9 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queries.MinDocQuery; -import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldComparator; @@ -82,14 +80,8 @@ import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.util.bkd.BKDConfig; -import org.apache.lucene.util.bkd.BKDReader; -import org.apache.lucene.util.bkd.BKDWriter; import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.settings.Settings; import org.opensearch.index.mapper.DateFieldMapper; @@ -114,13 +106,13 @@ import java.util.Collections; import java.util.List; -import static org.opensearch.search.query.QueryPhase.pointsHaveDuplicateData; -import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; -import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.spy; @@ -675,7 +667,7 @@ public void testDisableTopScoreCollection() throws Exception { dir.close(); } - public void testNumericLongOrDateSortOptimization() throws Exception { + public void testEnhanceSortOnNumeric() throws Exception { final String fieldNameLong = "long-field"; final String fieldNameDate = "date-field"; MappedFieldType fieldTypeLong = new NumberFieldMapper.NumberFieldType(fieldNameLong, NumberFieldMapper.NumberType.LONG); @@ -684,153 +676,144 @@ public void testNumericLongOrDateSortOptimization() throws Exception { when(mapperService.fieldType(fieldNameLong)).thenReturn(fieldTypeLong); when(mapperService.fieldType(fieldNameDate)).thenReturn(fieldTypeDate); // enough docs to have a tree with several leaf nodes - final int numDocs = 3500 * 20; + final int numDocs = 3500 * 5; Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(null)); + long firstValue = randomLongBetween(-10000000L, 10000000L); + long longValue = firstValue; + long dateValue = randomLongBetween(0, 3000000000000L); for (int i = 1; i <= numDocs; ++i) { Document doc = new Document(); - long longValue = randomLongBetween(-10000000L, 10000000L); + doc.add(new LongPoint(fieldNameLong, longValue)); doc.add(new NumericDocValuesField(fieldNameLong, longValue)); - longValue = randomLongBetween(0, 3000000000000L); - doc.add(new LongPoint(fieldNameDate, longValue)); - doc.add(new NumericDocValuesField(fieldNameDate, longValue)); + + doc.add(new LongPoint(fieldNameDate, dateValue)); + doc.add(new NumericDocValuesField(fieldNameDate, dateValue)); writer.addDocument(doc); + longValue++; + dateValue++; if (i % 3500 == 0) writer.commit(); } writer.close(); final IndexReader reader = DirectoryReader.open(dir); - - TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newOptimizedContextSearcher(reader, 0))); - when(searchContext.mapperService()).thenReturn(mapperService); - - // 1. Test a sort on long field final SortField sortFieldLong = new SortField(fieldNameLong, SortField.Type.LONG); sortFieldLong.setMissingValue(Long.MAX_VALUE); - final Sort longSort = new Sort(sortFieldLong); - SortAndFormats sortAndFormats = new SortAndFormats(longSort, new DocValueFormat[] { DocValueFormat.RAW }); - searchContext.sort(sortAndFormats); - searchContext.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - searchContext.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); - searchContext.setSize(10); - QueryPhase.executeInternal(searchContext); - assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); - - // 2. Test a sort on long field + date field final SortField sortFieldDate = new SortField(fieldNameDate, SortField.Type.LONG); + sortFieldDate.setMissingValue(Long.MAX_VALUE); DocValueFormat dateFormat = fieldTypeDate.docValueFormat(null, null); + final Sort longSort = new Sort(sortFieldLong); final Sort longDateSort = new Sort(sortFieldLong, sortFieldDate); - sortAndFormats = new SortAndFormats(longDateSort, new DocValueFormat[] { DocValueFormat.RAW, dateFormat }); - searchContext.sort(sortAndFormats); - QueryPhase.executeInternal(searchContext); - assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true); + final Sort dateSort = new Sort(sortFieldDate); + final Sort dateLongSort = new Sort(sortFieldDate, sortFieldLong); + SortAndFormats longSortAndFormats = new SortAndFormats(longSort, new DocValueFormat[] { DocValueFormat.RAW }); + SortAndFormats longDateSortAndFormats = new SortAndFormats(longDateSort, new DocValueFormat[] { DocValueFormat.RAW, dateFormat }); + SortAndFormats dateSortAndFormats = new SortAndFormats(dateSort, new DocValueFormat[] { dateFormat }); + SortAndFormats dateLongSortAndFormats = new SortAndFormats(dateLongSort, new DocValueFormat[] { dateFormat, DocValueFormat.RAW }); + ParsedQuery query = new ParsedQuery(new MatchAllDocsQuery()); + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); + + // 1. Test a sort on long field + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext); + assertTrue(searchContext.sort().sort.getSort()[0].getCanUsePoints()); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); + } + + // 2. Test a sort on long field + date field + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(longDateSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext); + assertTrue(searchContext.sort().sort.getSort()[0].getCanUsePoints()); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true); + } // 3. Test a sort on date field - sortFieldDate.setMissingValue(Long.MAX_VALUE); - final Sort dateSort = new Sort(sortFieldDate); - sortAndFormats = new SortAndFormats(dateSort, new DocValueFormat[] { dateFormat }); - searchContext.sort(sortAndFormats); - QueryPhase.executeInternal(searchContext); - assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(dateSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext); + assertTrue(searchContext.sort().sort.getSort()[0].getCanUsePoints()); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); + } // 4. Test a sort on date field + long field - final Sort dateLongSort = new Sort(sortFieldDate, sortFieldLong); - sortAndFormats = new SortAndFormats(dateLongSort, new DocValueFormat[] { dateFormat, DocValueFormat.RAW }); - searchContext.sort(sortAndFormats); - QueryPhase.executeInternal(searchContext); - assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true); + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(dateLongSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext); + assertTrue(searchContext.sort().sort.getSort()[0].getCanUsePoints()); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true); + } // 5. Test that sort optimization is run when from > 0 and size = 0 { - sortAndFormats = new SortAndFormats(longSort, new DocValueFormat[] { DocValueFormat.RAW }); - searchContext.sort(sortAndFormats); + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); searchContext.from(5); searchContext.setSize(0); QueryPhase.executeInternal(searchContext); + assertTrue(searchContext.sort().sort.getSort()[0].getCanUsePoints()); assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); } - // 6. Test that sort optimization is NOT run with from = 0 and size= 0 + // 6. Test that sort optimization works with from = 0 and size= 0 { - sortAndFormats = new SortAndFormats(longSort, new DocValueFormat[] { DocValueFormat.RAW }); - searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); when(searchContext.mapperService()).thenReturn(mapperService); - searchContext.sort(sortAndFormats); - searchContext.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - searchContext.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); searchContext.setSize(0); + QueryPhase.executeInternal(searchContext); + } + // 7. Test that sort optimization works with search after + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader))); + when(searchContext.mapperService()).thenReturn(mapperService); + int afterDocument = (int) randomLongBetween(0, 50); + long afterValue = firstValue + afterDocument; + FieldDoc after = new FieldDoc(afterDocument, Float.NaN, new Long[] { afterValue }); + searchContext.searchAfter(after); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); QueryPhase.executeInternal(searchContext); - TotalHits totalHits = searchContext.queryResult().topDocs().topDocs.totalHits; - assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation); - assertEquals(numDocs, totalHits.value); + assertTrue(searchContext.sort().sort.getSort()[0].getCanUsePoints()); + final TopDocs topDocs = searchContext.queryResult().topDocs().topDocs; + long topValue = (long) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]; + assertThat(topValue, greaterThan(afterValue)); + assertSortResults(topDocs, (long) numDocs, false); } reader.close(); dir.close(); } - public void testIndexHasDuplicateData() throws IOException { - int docsCount = 5000; - int maxPointsInLeafNode = 40; - float duplicateRatio = 0.7f; - long duplicateValue = randomLongBetween(-10000000L, 10000000L); - BKDConfig config = new BKDConfig(1, 1, 8, maxPointsInLeafNode); - try (Directory dir = newDirectory()) { - BKDWriter w = new BKDWriter(docsCount, dir, "tmp", config, 1, docsCount); - byte[] longBytes = new byte[8]; - for (int docId = 0; docId < docsCount; docId++) { - long value = randomFloat() < duplicateRatio ? duplicateValue : randomLongBetween(-10000000L, 10000000L); - LongPoint.encodeDimension(value, longBytes, 0); - w.add(longBytes, docId); - } - try ( - IndexOutput metaout = dir.createOutput("bkdmeta", IOContext.DEFAULT); - IndexOutput indexout = dir.createOutput("bkdindex", IOContext.DEFAULT); - IndexOutput dataout = dir.createOutput("bkddata", IOContext.DEFAULT) - ) { - w.finish(metaout, indexout, dataout).run(); - } - try ( - IndexInput metain = dir.openInput("bkdmeta", IOContext.DEFAULT); - IndexInput indexin = dir.openInput("bkdindex", IOContext.DEFAULT); - IndexInput datain = dir.openInput("bkddata", IOContext.DEFAULT) - ) { - BKDReader r = new BKDReader(metain, indexin, datain); - assertTrue(pointsHaveDuplicateData(r, r.getDocCount() / 2)); - } - } - } - - public void testIndexHasNoDuplicateData() throws IOException { - int docsCount = 5000; - int maxPointsInLeafNode = 40; - float duplicateRatio = 0.3f; - long duplicateValue = randomLongBetween(-10000000L, 10000000L); - BKDConfig config = new BKDConfig(1, 1, 8, maxPointsInLeafNode); - try (Directory dir = newDirectory()) { - BKDWriter w = new BKDWriter(docsCount, dir, "tmp", config, 1, docsCount); - byte[] longBytes = new byte[8]; - for (int docId = 0; docId < docsCount; docId++) { - long value = randomFloat() < duplicateRatio ? duplicateValue : randomLongBetween(-10000000L, 10000000L); - LongPoint.encodeDimension(value, longBytes, 0); - w.add(longBytes, docId); - } - long indexFP; - try (IndexOutput out = dir.createOutput("bkd", IOContext.DEFAULT)) { - Runnable finalizer = w.finish(out, out, out); - indexFP = out.getFilePointer(); - finalizer.run(); - ; - } - try (IndexInput in = dir.openInput("bkd", IOContext.DEFAULT)) { - in.seek(indexFP); - BKDReader r = new BKDReader(in, in, in); - assertFalse(pointsHaveDuplicateData(r, r.getDocCount() / 2)); - } - } - } - public void testMaxScoreQueryVisitor() { BitSetProducer producer = context -> new FixedBitSet(1); Query query = new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"); @@ -1023,47 +1006,6 @@ public void search(List leaves, Weight weight, Collector coll }; } - // used to check that numeric long or date sort optimization was run - private static ContextIndexSearcher newOptimizedContextSearcher(IndexReader reader, int queryType) throws IOException { - return new ContextIndexSearcher( - reader, - IndexSearcher.getDefaultSimilarity(), - IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), - true - ) { - - @Override - public void search( - List leaves, - Weight weight, - CollectorManager manager, - QuerySearchResult result, - DocValueFormat[] formats, - TotalHits totalHits - ) throws IOException { - final Query query = weight.getQuery(); - assertTrue(query instanceof BooleanQuery); - List clauses = ((BooleanQuery) query).clauses(); - assertTrue(clauses.size() == 2); - assertTrue(clauses.get(0).getOccur() == Occur.FILTER); - assertTrue(clauses.get(1).getOccur() == Occur.SHOULD); - if (queryType == 0) { - assertTrue( - clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass() - ); - } - if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); - super.search(leaves, weight, manager, result, formats, totalHits); - } - - @Override - public void search(List leaves, Weight weight, Collector collector) { - assert (false); // should not be there, expected to search with CollectorManager - } - }; - } - private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { private final int size; diff --git a/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java index 897dfe54cd153..6deae9d8dae33 100644 --- a/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java @@ -64,7 +64,6 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.fielddata.IndexNumericFieldData; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.TextSearchInfo; import org.opensearch.index.mapper.ValueFetcher; import org.opensearch.index.query.QueryShardContext; @@ -163,7 +162,7 @@ private QueryShardContext createShardContext( ) { @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java index e8d300e1eb9e3..c14deb6add083 100644 --- a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java @@ -268,7 +268,6 @@ public void testGeoDistanceSortCanBeParsedFromGeoHash() throws IOException { + " \"nested\" : {\n" + " \"filter\" : {\n" + " \"ids\" : {\n" - + " \"type\" : [ ],\n" + " \"values\" : [ ],\n" + " \"boost\" : 5.711116\n" + " }\n" diff --git a/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java index 4465d78cb61b6..36acb1ba2f3e0 100644 --- a/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java @@ -127,6 +127,7 @@ public void testSingleFieldSort() throws IOException { result = parseSort(json); assertEquals(1, result.size()); sortBuilder = result.get(0); + assertWarnings("Deprecated field [_geoDistance] used, expected [_geo_distance] instead"); assertEquals(new GeoDistanceSortBuilder("pin.location", 40, -70), sortBuilder); json = "{ \"sort\" : [" + "{\"_geo_distance\" : {" + "\"pin.location\" : \"40,-70\" } }" + "] }"; diff --git a/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortBuilder.java b/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortBuilder.java new file mode 100644 index 0000000000000..6ee53a7936320 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortBuilder.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.sort.plugin; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; + +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.index.query.QueryRewriteContext; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.sort.BucketedSort; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.search.sort.SortBuilders; +import org.opensearch.search.sort.SortFieldAndFormat; +import org.opensearch.search.sort.SortOrder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Custom sort builder that just rewrites to a basic field sort + */ +public class CustomSortBuilder extends SortBuilder { + public static String NAME = "_custom"; + public static ParseField SORT_FIELD = new ParseField("sort_field"); + + public final String field; + public final SortOrder order; + + public CustomSortBuilder(String field, SortOrder order) { + this.field = field; + this.order = order; + } + + public CustomSortBuilder(StreamInput in) throws IOException { + this.field = in.readString(); + this.order = in.readOptionalWriteable(SortOrder::readFromStream); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(field); + out.writeOptionalWriteable(order); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public SortBuilder rewrite(final QueryRewriteContext ctx) throws IOException { + return SortBuilders.fieldSort(field).order(order); + } + + @Override + protected SortFieldAndFormat build(final QueryShardContext context) throws IOException { + throw new IllegalStateException("rewrite"); + } + + @Override + public BucketedSort buildBucketedSort(final QueryShardContext context, final int bucketSize, final BucketedSort.ExtraData extra) + throws IOException { + throw new IllegalStateException("rewrite"); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + CustomSortBuilder other = (CustomSortBuilder) object; + return Objects.equals(field, other.field) && Objects.equals(order, other.order); + } + + @Override + public int hashCode() { + return Objects.hash(field, order); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.startObject(NAME); + builder.field(SORT_FIELD.getPreferredName(), field); + builder.field(ORDER_FIELD.getPreferredName(), order); + builder.endObject(); + builder.endObject(); + return builder; + } + + public static CustomSortBuilder fromXContent(XContentParser parser, String elementName) { + return PARSER.apply(parser, null); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + a -> new CustomSortBuilder((String) a[0], (SortOrder) a[1]) + ); + + static { + PARSER.declareField(constructorArg(), XContentParser::text, SORT_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), p -> SortOrder.fromString(p.text()), ORDER_FIELD, ObjectParser.ValueType.STRING); + } +} diff --git a/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortPlugin.java b/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortPlugin.java new file mode 100644 index 0000000000000..ed4df9126b6e0 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortPlugin.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.sort.plugin; + +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SearchPlugin; + +import java.util.Collections; +import java.util.List; + +public class CustomSortPlugin extends Plugin implements SearchPlugin { + @Override + public List> getSorts() { + return Collections.singletonList(new SortSpec<>(CustomSortBuilder.NAME, CustomSortBuilder::new, CustomSortBuilder::fromXContent)); + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 5eb58b2a06768..1afb71f416015 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -178,7 +178,6 @@ import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; -import org.opensearch.indices.flush.SyncedFlushService; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; @@ -1842,7 +1841,6 @@ public void onFailure(final Exception e) { new NodeMappingRefreshAction(transportService, metadataMappingService), repositoriesService, mock(SearchService.class), - new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver), new PeerRecoverySourceService(transportService, indicesService, recoverySettings), snapshotShardsService, new PrimaryReplicaSyncer( diff --git a/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java b/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java index 45139cedeeb37..8e33f09f7c3e9 100644 --- a/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java +++ b/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java @@ -73,7 +73,6 @@ public static void aggregateAndCheckFromSeveralShards(OpenSearchIntegTestCase te private static void checkSignificantTermsAggregationCorrect(OpenSearchIntegTestCase testCase) { SearchResponse response = client().prepareSearch(INDEX_NAME) - .setTypes(DOC_TYPE) .addAggregation(terms("class").field(CLASS_FIELD).subAggregation(significantTerms("sig_terms").field(TEXT_FIELD))) .execute() .actionGet(); diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index 882a783b66792..4076e7229ebf7 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; +import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -57,13 +58,17 @@ import org.junit.After; import org.junit.Before; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.instanceOf; public class InboundHandlerTests extends OpenSearchTestCase { @@ -75,16 +80,24 @@ public class InboundHandlerTests extends OpenSearchTestCase { private Transport.ResponseHandlers responseHandlers; private Transport.RequestHandlers requestHandlers; private InboundHandler handler; + private OutboundHandler outboundHandler; private FakeTcpChannel channel; @Before public void setUp() throws Exception { super.setUp(); taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); - channel = new FakeTcpChannel(randomBoolean(), buildNewFakeTransportAddress().address(), buildNewFakeTransportAddress().address()); + channel = new FakeTcpChannel(randomBoolean(), buildNewFakeTransportAddress().address(), buildNewFakeTransportAddress().address()) { + public void sendMessage(BytesReference reference, org.opensearch.action.ActionListener listener) { + super.sendMessage(reference, listener); + if (listener != null) { + listener.onResponse(null); + } + } + }; NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); TransportHandshaker handshaker = new TransportHandshaker(version, threadPool, (n, c, r, v) -> {}); - OutboundHandler outboundHandler = new OutboundHandler( + outboundHandler = new OutboundHandler( "node", version, new String[0], @@ -211,7 +224,7 @@ public TestResponse read(StreamInput in) throws IOException { BytesReference fullResponseBytes = channel.getMessageCaptor().get(); BytesReference responseContent = fullResponseBytes.slice(headerSize, fullResponseBytes.length() - headerSize); - Header responseHeader = new Header(fullRequestBytes.length() - 6, requestId, responseStatus, version); + Header responseHeader = new Header(fullResponseBytes.length() - 6, requestId, responseStatus, version); InboundMessage responseMessage = new InboundMessage(responseHeader, ReleasableBytesReference.wrap(responseContent), () -> {}); responseHeader.finishParsingHeader(responseMessage.openOrGetStreamInput()); handler.inboundMessage(channel, responseMessage); @@ -326,6 +339,317 @@ public void testLogsSlowInboundProcessing() throws Exception { } } + public void testRequestNotFullyRead() throws Exception { + String action = "test-request"; + int headerSize = TcpHeader.headerSize(version); + AtomicReference exceptionCaptor = new AtomicReference<>(); + + long requestId = responseHandlers.add(new Transport.ResponseContext<>(new TransportResponseHandler() { + @Override + public void handleResponse(TestResponse response) {} + + @Override + public void handleException(TransportException exp) { + exceptionCaptor.set(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + }, null, action)); + + RequestHandlerRegistry registry = new RequestHandlerRegistry<>( + action, + TestRequest::new, + taskManager, + (request, channel, task) -> {}, + ThreadPool.Names.SAME, + false, + true + ); + + requestHandlers.registerHandler(registry); + String requestValue = randomAlphaOfLength(10); + OutboundMessage.Request request = new OutboundMessage.Request( + threadPool.getThreadContext(), + new String[0], + new TestRequest(requestValue), + version, + action, + requestId, + false, + false + ); + + outboundHandler.setMessageListener(new TransportMessageListener() { + @Override + public void onResponseSent(long requestId, String action, Exception error) { + exceptionCaptor.set(error); + } + }); + + // Create the request payload with 1 byte overflow + final BytesRef bytes = request.serialize(new BytesStreamOutput()).toBytesRef(); + final ByteBuffer buffer = ByteBuffer.allocate(bytes.length + 1); + buffer.put(bytes.bytes, 0, bytes.length); + buffer.put((byte) 1); + + BytesReference fullRequestBytes = BytesReference.fromByteBuffer((ByteBuffer) buffer.flip()); + BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); + Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); + InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); + handler.inboundMessage(channel, requestMessage); + + assertThat(exceptionCaptor.get(), instanceOf(IllegalStateException.class)); + assertThat(exceptionCaptor.get().getMessage(), startsWith("Message not fully read (request) for requestId")); + } + + public void testRequestFullyReadButMoreDataIsAvailable() throws Exception { + String action = "test-request"; + int headerSize = TcpHeader.headerSize(version); + AtomicReference exceptionCaptor = new AtomicReference<>(); + + long requestId = responseHandlers.add(new Transport.ResponseContext<>(new TransportResponseHandler() { + @Override + public void handleResponse(TestResponse response) {} + + @Override + public void handleException(TransportException exp) { + exceptionCaptor.set(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + }, null, action)); + + RequestHandlerRegistry registry = new RequestHandlerRegistry<>( + action, + TestRequest::new, + taskManager, + (request, channel, task) -> {}, + ThreadPool.Names.SAME, + false, + true + ); + + requestHandlers.registerHandler(registry); + String requestValue = randomAlphaOfLength(10); + OutboundMessage.Request request = new OutboundMessage.Request( + threadPool.getThreadContext(), + new String[0], + new TestRequest(requestValue), + version, + action, + requestId, + false, + false + ); + + outboundHandler.setMessageListener(new TransportMessageListener() { + @Override + public void onResponseSent(long requestId, String action, Exception error) { + exceptionCaptor.set(error); + } + }); + + final BytesReference fullRequestBytes = request.serialize(new BytesStreamOutput()); + // Create the request payload by intentionally stripping 1 byte away + BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize - 1); + Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); + InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); + handler.inboundMessage(channel, requestMessage); + + assertThat(exceptionCaptor.get(), instanceOf(IllegalStateException.class)); + assertThat(exceptionCaptor.get().getCause(), instanceOf(EOFException.class)); + assertThat(exceptionCaptor.get().getMessage(), startsWith("Message fully read (request) but more data is expected for requestId")); + } + + public void testResponseNotFullyRead() throws Exception { + String action = "test-request"; + int headerSize = TcpHeader.headerSize(version); + AtomicReference requestCaptor = new AtomicReference<>(); + AtomicReference exceptionCaptor = new AtomicReference<>(); + AtomicReference responseCaptor = new AtomicReference<>(); + AtomicReference channelCaptor = new AtomicReference<>(); + + long requestId = responseHandlers.add(new Transport.ResponseContext<>(new TransportResponseHandler() { + @Override + public void handleResponse(TestResponse response) { + responseCaptor.set(response); + } + + @Override + public void handleException(TransportException exp) { + exceptionCaptor.set(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + }, null, action)); + RequestHandlerRegistry registry = new RequestHandlerRegistry<>( + action, + TestRequest::new, + taskManager, + (request, channel, task) -> { + channelCaptor.set(channel); + requestCaptor.set(request); + }, + ThreadPool.Names.SAME, + false, + true + ); + requestHandlers.registerHandler(registry); + String requestValue = randomAlphaOfLength(10); + OutboundMessage.Request request = new OutboundMessage.Request( + threadPool.getThreadContext(), + new String[0], + new TestRequest(requestValue), + version, + action, + requestId, + false, + false + ); + + BytesReference fullRequestBytes = request.serialize(new BytesStreamOutput()); + BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); + Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); + InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); + handler.inboundMessage(channel, requestMessage); + + TransportChannel transportChannel = channelCaptor.get(); + assertEquals(Version.CURRENT, transportChannel.getVersion()); + assertEquals("transport", transportChannel.getChannelType()); + assertEquals(requestValue, requestCaptor.get().value); + + String responseValue = randomAlphaOfLength(10); + byte responseStatus = TransportStatus.setResponse((byte) 0); + transportChannel.sendResponse(new TestResponse(responseValue)); + + // Create the response payload with 1 byte overflow + final BytesRef bytes = channel.getMessageCaptor().get().toBytesRef(); + final ByteBuffer buffer = ByteBuffer.allocate(bytes.length + 1); + buffer.put(bytes.bytes, 0, bytes.length); + buffer.put((byte) 1); + + BytesReference fullResponseBytes = BytesReference.fromByteBuffer((ByteBuffer) buffer.flip()); + BytesReference responseContent = fullResponseBytes.slice(headerSize, fullResponseBytes.length() - headerSize); + Header responseHeader = new Header(fullResponseBytes.length() - 6, requestId, responseStatus, version); + InboundMessage responseMessage = new InboundMessage(responseHeader, ReleasableBytesReference.wrap(responseContent), () -> {}); + responseHeader.finishParsingHeader(responseMessage.openOrGetStreamInput()); + handler.inboundMessage(channel, responseMessage); + + assertThat(exceptionCaptor.get(), instanceOf(RemoteTransportException.class)); + assertThat(exceptionCaptor.get().getCause(), instanceOf(TransportSerializationException.class)); + assertThat(exceptionCaptor.get().getMessage(), containsString("Failed to deserialize response from handler")); + } + + public void testResponseFullyReadButMoreDataIsAvailable() throws Exception { + String action = "test-request"; + int headerSize = TcpHeader.headerSize(version); + AtomicReference requestCaptor = new AtomicReference<>(); + AtomicReference exceptionCaptor = new AtomicReference<>(); + AtomicReference responseCaptor = new AtomicReference<>(); + AtomicReference channelCaptor = new AtomicReference<>(); + + long requestId = responseHandlers.add(new Transport.ResponseContext<>(new TransportResponseHandler() { + @Override + public void handleResponse(TestResponse response) { + responseCaptor.set(response); + } + + @Override + public void handleException(TransportException exp) { + exceptionCaptor.set(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + }, null, action)); + RequestHandlerRegistry registry = new RequestHandlerRegistry<>( + action, + TestRequest::new, + taskManager, + (request, channel, task) -> { + channelCaptor.set(channel); + requestCaptor.set(request); + }, + ThreadPool.Names.SAME, + false, + true + ); + requestHandlers.registerHandler(registry); + String requestValue = randomAlphaOfLength(10); + OutboundMessage.Request request = new OutboundMessage.Request( + threadPool.getThreadContext(), + new String[0], + new TestRequest(requestValue), + version, + action, + requestId, + false, + false + ); + + BytesReference fullRequestBytes = request.serialize(new BytesStreamOutput()); + BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); + Header requestHeader = new Header(fullRequestBytes.length() - 6, requestId, TransportStatus.setRequest((byte) 0), version); + InboundMessage requestMessage = new InboundMessage(requestHeader, ReleasableBytesReference.wrap(requestContent), () -> {}); + requestHeader.finishParsingHeader(requestMessage.openOrGetStreamInput()); + handler.inboundMessage(channel, requestMessage); + + TransportChannel transportChannel = channelCaptor.get(); + assertEquals(Version.CURRENT, transportChannel.getVersion()); + assertEquals("transport", transportChannel.getChannelType()); + assertEquals(requestValue, requestCaptor.get().value); + + String responseValue = randomAlphaOfLength(10); + byte responseStatus = TransportStatus.setResponse((byte) 0); + transportChannel.sendResponse(new TestResponse(responseValue)); + + BytesReference fullResponseBytes = channel.getMessageCaptor().get(); + // Create the response payload by intentionally stripping 1 byte away + BytesReference responseContent = fullResponseBytes.slice(headerSize, fullResponseBytes.length() - headerSize - 1); + Header responseHeader = new Header(fullResponseBytes.length() - 6, requestId, responseStatus, version); + InboundMessage responseMessage = new InboundMessage(responseHeader, ReleasableBytesReference.wrap(responseContent), () -> {}); + responseHeader.finishParsingHeader(responseMessage.openOrGetStreamInput()); + handler.inboundMessage(channel, responseMessage); + + assertThat(exceptionCaptor.get(), instanceOf(RemoteTransportException.class)); + assertThat(exceptionCaptor.get().getCause(), instanceOf(TransportSerializationException.class)); + assertThat(exceptionCaptor.get().getMessage(), containsString("Failed to deserialize response from handler")); + } + private static InboundMessage unreadableInboundHandshake(Version remoteVersion, Header requestHeader) { return new InboundMessage(requestHeader, ReleasableBytesReference.wrap(BytesArray.EMPTY), () -> {}) { @Override diff --git a/server/src/test/resources/org/opensearch/action/bulk/bulk-log.json b/server/src/test/resources/org/opensearch/action/bulk/bulk-log.json index 9c3663c3f63bc..05fccca8ca91d 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/bulk-log.json +++ b/server/src/test/resources/org/opensearch/action/bulk/bulk-log.json @@ -1,24 +1,24 @@ -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} -{"index":{"_index":"logstash-2014.03.30","_type":"logs"}} +{"index":{"_index":"logstash-2014.03.30"}} {"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"} diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk.json index cf76477187524..e36d1b7fc00b8 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk.json @@ -1,5 +1,5 @@ -{ "index":{"_index":"test","_type":"type1","_id":"1"} } +{ "index":{"_index":"test","_id":"1"} } { "field1" : "value1" } -{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } } -{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } } +{ "delete" : { "_index" : "test", "_id" : "2" } } +{ "create" : { "_index" : "test", "_id" : "3" } } { "field1" : "value3" } diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk10.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk10.json index 3556dc261b037..7721d6f073fbd 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk10.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk10.json @@ -1,15 +1,15 @@ -{ "index" : {"_index":null, "_type":"type1", "_id":"0"} } +{ "index" : {"_index":null, "_id":"0"} } { "field1" : "value1" } -{ "index" : {"_index":"test", "_type":null, "_id":"0"} } +{ "index" : {"_index":"test", "_id":"0"} } { "field1" : "value1" } -{ "index" : {"_index":"test", "_type":"type1", "_id":null} } +{ "index" : {"_index":"test", "_id":null} } { "field1" : "value1" } -{ "delete" : {"_index":null, "_type":"type1", "_id":"0"} } -{ "delete" : {"_index":"test", "_type":null, "_id":"0"} } -{ "delete" : {"_index":"test", "_type":"type1", "_id":null} } -{ "create" : {"_index":null, "_type":"type1", "_id":"0"} } +{ "delete" : {"_index":null, "_id":"0"} } +{ "delete" : {"_index":"test", "_id":"0"} } +{ "delete" : {"_index":"test", "_id":null} } +{ "create" : {"_index":null, "_id":"0"} } { "field1" : "value1" } -{ "create" : {"_index":"test", "_type":null, "_id":"0"} } +{ "create" : {"_index":"test", "_id":"0"} } { "field1" : "value1" } -{ "create" : {"_index":"test", "_type":"type1", "_id":null} } +{ "create" : {"_index":"test", "_id":null} } { "field1" : "value1" } diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk11.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk11.json index 9be3c13061234..2242dd01c8145 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk11.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk11.json @@ -1,5 +1,5 @@ -{ "index":{"_index":"test","_type":"type1","_id":"1"} } +{ "index":{"_index":"test","_id":"1"} } { "field1" : "value1" } -{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } } -{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } } +{ "delete" : { "_index" : "test", "_id" : "2" } } +{ "create" : { "_index" : "test", "_id" : "3" } } { "field1" : "value3" } \ No newline at end of file diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk4.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk4.json index 94d95614568ca..e1911094e7d88 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk4.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk4.json @@ -1,6 +1,6 @@ { "update" : {"_id" : "1", "retry_on_conflict" : 2} } { "doc" : {"field" : "value"} } -{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1" } } +{ "update" : { "_id" : "0", "_index" : "index1" } } { "script" : { "source" : "counter += param1", "lang" : "javascript", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}} { "delete" : { "_id" : "2" } } { "create" : { "_id" : "3" } } diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk5.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk5.json index 6ad5ff3052f25..9e04df1af69ee 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk5.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk5.json @@ -1,5 +1,5 @@ -{ "index": {"_type": "type1","_id": "1"} } +{ "index": { "_id": "1"} } { "field1" : "value1" } -{ "delete" : { "_type" : "type1", "_id" : "2" } } -{ "create" : { "_type" : "type1", "_id" : "3" } } +{ "delete" : { "_id" : "2" } } +{ "create" : { "_id" : "3" } } { "field1" : "value3" } diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk6.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk6.json index e9c97965595eb..86e8757af832d 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk6.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk6.json @@ -1,6 +1,6 @@ -{"index": {"_index": "test", "_type": "doc", "_source": {"hello": "world"}, "_id": 0}} +{"index": {"_index": "test", "_source": {"hello": "world"}, "_id": 0}} {"field1": "value0"} -{"index": {"_index": "test", "_type": "doc", "_id": 1}} +{"index": {"_index": "test", "_id": 1}} {"field1": "value1"} -{"index": {"_index": "test", "_type": "doc", "_id": 2}} +{"index": {"_index": "test", "_id": 2}} {"field1": "value2"} diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk7.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk7.json index 669bfd10798e9..cd742def27e9f 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk7.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk7.json @@ -1,6 +1,6 @@ -{"index": {"_index": "test", "_type": "doc", "_id": 0}} +{"index": {"_index": "test", "_id": 0}} {"field1": "value0"} -{"index": {"_index": "test", "_type": "doc", "_id": 1}} +{"index": {"_index": "test", "_id": 1}} {"field1": "value1"} -{"index": {"_index": "test", "_type": "doc", "_id": 2, "_unknown": ["foo", "bar"]}} +{"index": {"_index": "test", "_id": 2, "_unknown": ["foo", "bar"]}} {"field1": "value2"} diff --git a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk8.json b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk8.json index c1a94b1d159d0..27d855258ed72 100644 --- a/server/src/test/resources/org/opensearch/action/bulk/simple-bulk8.json +++ b/server/src/test/resources/org/opensearch/action/bulk/simple-bulk8.json @@ -1,6 +1,6 @@ -{"index": {"_index": "test", "_type": "doc", "_id": 0}} +{"index": {"_index": "test", "_id": 0}} {"field1": "value0"} -{"index": {"_index": "test", "_type": "doc", "_id": 1, "_foo": "bar"}} +{"index": {"_index": "test", "_id": 1, "_foo": "bar"}} {"field1": "value1"} -{"index": {"_index": "test", "_type": "doc", "_id": 2}} +{"index": {"_index": "test", "_id": 2}} {"field1": "value2"} diff --git a/server/src/test/resources/org/opensearch/action/search/simple-msearch1.json b/server/src/test/resources/org/opensearch/action/search/simple-msearch1.json index 4749745cf0cb3..a40bc64471f34 100644 --- a/server/src/test/resources/org/opensearch/action/search/simple-msearch1.json +++ b/server/src/test/resources/org/opensearch/action/search/simple-msearch1.json @@ -1,6 +1,6 @@ {"index":"test", "ignore_unavailable" : true, "expand_wildcards" : "open,closed"} {"query" : {"match_all" :{}}} -{"index" : "test", "type" : "type1", "expand_wildcards" : ["open", "closed"]} +{"index" : "test", "expand_wildcards" : ["open", "closed"]} {"query" : {"match_all" :{}}} {"index":"test", "ignore_unavailable" : false, "expand_wildcards" : ["open"]} {"query" : {"match_all" :{}}} diff --git a/server/src/test/resources/org/opensearch/action/search/simple-msearch2.json b/server/src/test/resources/org/opensearch/action/search/simple-msearch2.json index 79330d80f7267..ef82fee039638 100644 --- a/server/src/test/resources/org/opensearch/action/search/simple-msearch2.json +++ b/server/src/test/resources/org/opensearch/action/search/simple-msearch2.json @@ -1,6 +1,6 @@ {"index":"test"} {"query" : {"match_all" : {}}} -{"index" : "test", "type" : "type1"} +{"index" : "test"} {"query" : {"match_all" : {}}} {} {"query" : {"match_all" : {}}} diff --git a/server/src/test/resources/org/opensearch/action/search/simple-msearch3.json b/server/src/test/resources/org/opensearch/action/search/simple-msearch3.json index a6b52fd3bf93e..f7ff9a2b3f991 100644 --- a/server/src/test/resources/org/opensearch/action/search/simple-msearch3.json +++ b/server/src/test/resources/org/opensearch/action/search/simple-msearch3.json @@ -1,8 +1,8 @@ {"index":["test0", "test1"]} {"query" : {"match_all" : {}}} -{"index" : "test2,test3", "type" : "type1"} +{"index" : "test2,test3"} {"query" : {"match_all" : {}}} -{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ]} +{"index" : ["test4", "test1"]} {"query" : {"match_all" : {}}} {"search_type" : "dfs_query_then_fetch"} {"query" : {"match_all" : {}}} diff --git a/server/src/test/resources/org/opensearch/action/search/simple-msearch4.json b/server/src/test/resources/org/opensearch/action/search/simple-msearch4.json index 844d8bea1f8ee..4dd2cfde569dd 100644 --- a/server/src/test/resources/org/opensearch/action/search/simple-msearch4.json +++ b/server/src/test/resources/org/opensearch/action/search/simple-msearch4.json @@ -1,6 +1,6 @@ {"index":["test0", "test1"], "request_cache": true} {"query" : {"match_all" : {}}} -{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"index" : "test2,test3", "preference": "_local"} {"query" : {"match_all" : {}}} -{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"index" : ["test4", "test1"], "routing": "123"} {"query" : {"match_all" : {}}} diff --git a/server/src/test/resources/org/opensearch/action/termvectors/multiRequest1.json b/server/src/test/resources/org/opensearch/action/termvectors/multiRequest1.json index fcb5e3a927ad9..1cfc14ce50a95 100644 --- a/server/src/test/resources/org/opensearch/action/termvectors/multiRequest1.json +++ b/server/src/test/resources/org/opensearch/action/termvectors/multiRequest1.json @@ -7,7 +7,6 @@ "offsets":false, "positions":false, "fields":["a","b","c"], - "_index": "testidx", - "_type":"test" + "_index": "testidx" } -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/action/termvectors/multiRequest2.json b/server/src/test/resources/org/opensearch/action/termvectors/multiRequest2.json index a0709effe709b..bb5dcc1b6abe7 100644 --- a/server/src/test/resources/org/opensearch/action/termvectors/multiRequest2.json +++ b/server/src/test/resources/org/opensearch/action/termvectors/multiRequest2.json @@ -8,8 +8,7 @@ "offsets": false, "positions": false, "fields":["a","b","c"], - "_index": "testidx", - "_type": "test" + "_index": "testidx" }, { "_id": "2", @@ -19,8 +18,7 @@ "offsets": false, "positions": false, "fields":["a","b","c"], - "_index": "testidx", - "_type": "test" + "_index": "testidx" } ] -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/action/termvectors/multiRequest3.json b/server/src/test/resources/org/opensearch/action/termvectors/multiRequest3.json index 457f43cdc9aca..3157650aba974 100644 --- a/server/src/test/resources/org/opensearch/action/termvectors/multiRequest3.json +++ b/server/src/test/resources/org/opensearch/action/termvectors/multiRequest3.json @@ -2,7 +2,6 @@ "ids": ["1","2"], "parameters": { "_index": "testidx", - "_type": "test", "filter": { "max_num_terms": 20, "min_term_freq": 1, @@ -13,4 +12,4 @@ "max_word_length": 20 } } -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/analysis/test1.yml b/server/src/test/resources/org/opensearch/index/analysis/test1.yml index 6b5d47128eab4..b4828003223b0 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/test1.yml +++ b/server/src/test/resources/org/opensearch/index/analysis/test1.yml @@ -25,6 +25,3 @@ index : custom6 : tokenizer : standard position_increment_gap: 256 - custom7 : - type : standard - version: 3.6 diff --git a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-mapping.json b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-mapping.json index 4b91bcfb36b5f..457fbdc668241 100644 --- a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-mapping.json +++ b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-mapping.json @@ -1,5 +1,5 @@ { - "person":{ + "_doc":{ "dynamic_templates":[ { "template_1":{ diff --git a/server/src/test/resources/org/opensearch/index/mapper/simple/test-mapping.json b/server/src/test/resources/org/opensearch/index/mapper/simple/test-mapping.json index 0f99af91ecb3a..55e462029ee6b 100644 --- a/server/src/test/resources/org/opensearch/index/mapper/simple/test-mapping.json +++ b/server/src/test/resources/org/opensearch/index/mapper/simple/test-mapping.json @@ -1,5 +1,5 @@ { - "person":{ + "_doc":{ "_meta":{ "param1":"value1" }, diff --git a/server/src/test/resources/org/opensearch/search/geo/gzippedmap.gz b/server/src/test/resources/org/opensearch/search/geo/gzippedmap.gz index f8894811a6c08..77dd01fbf5eed 100644 Binary files a/server/src/test/resources/org/opensearch/search/geo/gzippedmap.gz and b/server/src/test/resources/org/opensearch/search/geo/gzippedmap.gz differ diff --git a/settings.gradle b/settings.gradle index 32f56334e454d..14b850b5340b9 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.8" + id "com.gradle.enterprise" version "3.8.1" } rootProject.name = "OpenSearch" diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 2dfbb3e147205..d1040acd03aa7 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -38,9 +38,9 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.8.9' + api 'com.google.code.gson:gson:2.9.0' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - api 'net.minidev:json-smart:2.4.7' + api 'net.minidev:json-smart:2.4.8' } diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java index 26a96971ef9b2..e806548cee088 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java @@ -80,10 +80,8 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase { ) { @Override public Long getShardSize(ShardRouting shardRouting) { - assert shardRouting.recoverySource() - .getType() == RecoverySource.Type.SNAPSHOT : "Expecting a recovery source of type [SNAPSHOT] but got [" - + shardRouting.recoverySource().getType() - + ']'; + assert shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT + : "Expecting a recovery source of type [SNAPSHOT] but got [" + shardRouting.recoverySource().getType() + ']'; throw new UnsupportedOperationException(); } }; diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 5e9c5396455e4..3bea08edb226b 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -71,7 +71,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; import org.opensearch.common.Strings; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; @@ -200,11 +199,7 @@ protected Settings indexSettings() { IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY)) ) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) - .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), - randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000) - ) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) .build(); } @@ -352,8 +347,6 @@ public void tearDown() throws Exception { assertMaxSeqNoInCommitUserData(replicaEngine); assertAtMostOneLuceneDocumentPerSequenceNumber(replicaEngine); } - assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); - assertThat(replicaEngine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); } finally { IOUtils.close(replicaEngine, storeReplica, engine, store, () -> terminate(threadPool)); } @@ -977,7 +970,7 @@ public static Term newUid(ParsedDocument doc) { } protected Engine.Get newGet(boolean realtime, ParsedDocument doc) { - return new Engine.Get(realtime, realtime, doc.type(), doc.id(), newUid(doc)); + return new Engine.Get(realtime, realtime, doc.id(), newUid(doc)); } protected Engine.Index indexForDoc(ParsedDocument doc) { @@ -1362,13 +1355,9 @@ public static List readAllOperationsInLucene(Engine engine, /** * Reads all engine operations that have been processed by the engine from Lucene index/Translog based on source. */ - public static List readAllOperationsBasedOnSource( - Engine engine, - Engine.HistorySource historySource, - MapperService mapper - ) throws IOException { + public static List readAllOperationsBasedOnSource(Engine engine, MapperService mapper) throws IOException { final List operations = new ArrayList<>(); - try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", historySource, mapper, 0, Long.MAX_VALUE, false)) { + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapper, 0, Long.MAX_VALUE, false)) { Translog.Operation op; while ((op = snapshot.next()) != null) { operations.add(op); @@ -1381,10 +1370,7 @@ public static List readAllOperationsBasedOnSource( * Asserts the provided engine has a consistent document history between translog and Lucene index. */ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper) throws IOException { - if (mapper == null - || mapper.documentMapper() == null - || engine.config().getIndexSettings().isSoftDeleteEnabled() == false - || (engine instanceof InternalEngine) == false) { + if (mapper == null || mapper.documentMapper() == null || (engine instanceof InternalEngine) == false) { return; } final List translogOps = new ArrayList<>(); @@ -1406,8 +1392,12 @@ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine e final long globalCheckpoint = EngineTestCase.getTranslog(engine).getLastSyncedGlobalCheckpoint(); final long retainedOps = engine.config().getIndexSettings().getSoftDeleteRetentionOperations(); final long seqNoForRecovery; - try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { - seqNoForRecovery = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; + if (engine.config().getIndexSettings().isSoftDeleteEnabled()) { + try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { + seqNoForRecovery = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; + } + } else { + seqNoForRecovery = engine.getMinRetainedSeqNo(); } final long minSeqNoToRetain = Math.min(seqNoForRecovery, globalCheckpoint + 1 - retainedOps); for (Translog.Operation translogOp : translogOps) { diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java index ebc17df9fa567..571f1b21dd7e3 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java @@ -64,10 +64,10 @@ public static List fetchSourceValue(MappedFieldType fieldType, Object sourceV public static List fetchSourceValue(MappedFieldType fieldType, Object sourceValue, String format) throws IOException { String field = fieldType.name(); - MapperService mapperService = mock(MapperService.class); - when(mapperService.sourcePath(field)).thenReturn(org.opensearch.common.collect.Set.of(field)); + QueryShardContext context = mock(QueryShardContext.class); + when(context.sourcePath(field)).thenReturn(org.opensearch.common.collect.Set.of(field)); - ValueFetcher fetcher = fieldType.valueFetcher(mapperService, null, format); + ValueFetcher fetcher = fieldType.valueFetcher(context, null, format); SourceLookup lookup = new SourceLookup(); lookup.setSource(Collections.singletonMap(field, sourceValue)); return fetcher.fetchValues(lookup); diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index a3ee32b7fab57..fa0309ef165d4 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -250,7 +250,7 @@ QueryShardContext createQueryShardContext(MapperService mapperService) { ); when(queryShardContext.allowExpensiveQueries()).thenReturn(true); when(queryShardContext.lookup()).thenReturn( - new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); }, null) + new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); }) ); return queryShardContext; } diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java index fc5c7283ed8b3..a64193e55836d 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java @@ -293,7 +293,7 @@ protected final List fetchFromDocValues(MapperService mapperService, MappedFi mapperService, iw -> { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc()); }, iw -> { - SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup, null); + SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup); ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft)); IndexSearcher searcher = newSearcher(iw); LeafReaderContext context = searcher.getIndexReader().leaves().get(0); diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MockFieldMapper.java b/test/framework/src/main/java/org/opensearch/index/mapper/MockFieldMapper.java index 071329616fa05..6e504e0f8d20a 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MockFieldMapper.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MockFieldMapper.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; import java.util.Collections; @@ -77,7 +78,7 @@ public String typeName() { } @Override - public ValueFetcher valueFetcher(MapperService mapperService, SearchLookup searchLookup, String format) { + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { throw new UnsupportedOperationException(); } } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 97443f6cba811..5bb4ee5f29f16 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -152,11 +152,7 @@ protected IndexMetadata buildIndexMetadata(int replicas, Settings indexSettings, .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicas) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) - .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), - randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000) - ) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) .put(indexSettings) .build(); IndexMetadata.Builder metadata = IndexMetadata.builder(index.getName()).settings(settings).primaryTerm(0, randomIntBetween(1, 100)); @@ -250,7 +246,7 @@ protected EngineConfigFactory getEngineConfigFactory(IndexSettings indexSettings public int indexDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", Integer.toString(docId.incrementAndGet())) + final IndexRequest indexRequest = new IndexRequest(index.getName()).id(Integer.toString(docId.incrementAndGet())) .source("{}", XContentType.JSON); final BulkItemResponse response = index(indexRequest); if (response.isFailed()) { @@ -264,7 +260,7 @@ public int indexDocs(final int numOfDoc) throws Exception { public int appendDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { - final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); final BulkItemResponse response = index(indexRequest); if (response.isFailed()) { throw response.getFailure().getCause(); @@ -355,10 +351,8 @@ public IndexShard addReplica() throws IOException { } public synchronized void addReplica(IndexShard replica) throws IOException { - assert shardRoutings().stream() - .anyMatch(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())) == false : "replica with aId [" - + replica.routingEntry().allocationId() - + "] already exists"; + assert shardRoutings().stream().anyMatch(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())) == false + : "replica with aId [" + replica.routingEntry().allocationId() + "] already exists"; replicas.add(replica); if (replicationTargets != null) { replicationTargets.addReplica(replica); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 3c5a047974253..cc0b2219377d5 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -273,11 +273,7 @@ protected IndexShard newShard( .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) - .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), - randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000) - ) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) .put(settings) .build(); IndexMetadata.Builder metadata = IndexMetadata.builder(shardRouting.getIndexName()) diff --git a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java index 70b290c38ceba..5d55f098a1f82 100644 --- a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java @@ -149,7 +149,6 @@ public static IngestDocument randomIngestDocument(Random random) { */ public static IngestDocument randomIngestDocument(Random random, Map source) { String index = randomString(random); - String type = randomString(random); String id = randomString(random); String routing = null; Long version = randomNonNegtiveLong(random); @@ -160,7 +159,7 @@ public static IngestDocument randomIngestDocument(Random random, Map randomSource(Random random) { diff --git a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java index 1034fb4802c9a..ec8545e583d33 100644 --- a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java @@ -109,9 +109,6 @@ public static SearchRequest randomSearchRequest(Supplier ra if (randomBoolean()) { searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } - if (randomBoolean()) { - searchRequest.types(generateRandomStringArray(10, 10, false, false)); - } if (randomBoolean()) { searchRequest.preference(randomAlphaOfLengthBetween(3, 10)); } diff --git a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java index a96a37037d016..805e578a8e8db 100644 --- a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java @@ -222,14 +222,6 @@ public void ensureEstimatedStats() { stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L) ); - assertThat( - "Accounting breaker not reset to " - + stats.getIndices().getSegments().getMemoryInBytes() - + " on node: " - + stats.getNode(), - stats.getBreaker().getStats(CircuitBreaker.ACCOUNTING).getEstimated(), - equalTo(stats.getIndices().getSegments().getMemoryInBytes()) - ); // ExternalTestCluster does not check the request breaker, // because checking it requires a network request, which in // turn increments the breaker, making it non-0 diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 6e1972f193948..5ae441ed651b1 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -95,7 +95,6 @@ import org.opensearch.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressure; -import org.opensearch.index.engine.CommitStats; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineTestCase; @@ -135,7 +134,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -456,9 +454,8 @@ public InternalTestCluster( * It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMasterNodes is false. */ public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) { - assert autoManageMasterNodes == false - || bootstrapMasterNodeIndex == -1 : "bootstrapMasterNodeIndex should be -1 if autoManageMasterNodes is true, but was " - + bootstrapMasterNodeIndex; + assert autoManageMasterNodes == false || bootstrapMasterNodeIndex == -1 + : "bootstrapMasterNodeIndex should be -1 if autoManageMasterNodes is true, but was " + bootstrapMasterNodeIndex; this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex; } @@ -1252,48 +1249,11 @@ public void beforeIndexDeletion() throws Exception { // and not all docs have been purged after the test) and inherit from // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures. assertNoPendingIndexOperations(); - // check that shards that have same sync id also contain same number of documents - assertSameSyncIdSameDocs(); assertAllPendingWriteLimitsReleased(); assertOpenTranslogReferences(); assertNoSnapshottedIndexCommit(); } - private void assertSameSyncIdSameDocs() { - Map docsOnShards = new HashMap<>(); - final Collection nodesAndClients = nodes.values(); - for (NodeAndClient nodeAndClient : nodesAndClients) { - IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); - for (IndexService indexService : indexServices) { - for (IndexShard indexShard : indexService) { - try { - CommitStats commitStats = indexShard.commitStats(); - String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID); - if (syncId != null) { - long liveDocsOnShard = commitStats.getNumDocs(); - if (docsOnShards.get(syncId) != null) { - assertThat( - "sync id is equal but number of docs does not match on node " - + nodeAndClient.name - + ". expected " - + docsOnShards.get(syncId) - + " but got " - + liveDocsOnShard, - docsOnShards.get(syncId), - equalTo(liveDocsOnShard) - ); - } else { - docsOnShards.put(syncId, liveDocsOnShard); - } - } - } catch (AlreadyClosedException e) { - // the engine is closed or if the shard is recovering - } - } - } - } - } - private void assertAllPendingWriteLimitsReleased() throws Exception { assertBusy(() -> { for (NodeAndClient nodeAndClient : nodes.values()) { @@ -2440,19 +2400,6 @@ public void ensureEstimatedStats() { CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA); assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L)); - try { - assertBusy(() -> { - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat( - "Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", - acctBreaker.getUsed(), - equalTo(0L) - ); - }); - } catch (Exception e) { - throw new AssertionError("Exception during check for accounting breaker reset to 0", e); - } - // Anything that uses transport or HTTP can increase the // request breaker (because they use bigarrays), because of // that the breaker can sometimes be incremented from ping diff --git a/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java index a6aba5269ec91..ecdae6a0e8b64 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java +++ b/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java @@ -135,11 +135,6 @@ public void indexShardStateChanged( delegate.indexShardStateChanged(indexShard, previousState, currentState, reason); } - @Override - public void onShardInactive(IndexShard indexShard) { - delegate.onShardInactive(indexShard); - } - @Override public void beforeIndexCreated(Index index, Settings indexSettings) { delegate.beforeIndexCreated(index, indexSettings); diff --git a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java index 2dfbb54bb1861..328aaf8a65b1f 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java @@ -68,8 +68,12 @@ public class MockLogAppender extends AbstractAppender implements AutoCloseable { * write to a closed MockLogAppender instance. */ public static MockLogAppender createForLoggers(Logger... loggers) throws IllegalAccessException { + return createForLoggers(".*(\n.*)*", loggers); + } + + public static MockLogAppender createForLoggers(String filter, Logger... loggers) throws IllegalAccessException { final MockLogAppender appender = new MockLogAppender( - RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), + RegexFilter.createFilter(filter, new String[0], false, null, null), Collections.unmodifiableList(Arrays.asList(loggers)) ); appender.start(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 7cae8e30d0d74..dbc6dd012daee 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -197,7 +197,6 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.opensearch.client.Requests.syncedFlushRequest; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.unit.TimeValue.timeValueMillis; @@ -748,7 +747,6 @@ public Settings indexSettings() { } // always default delayed allocation to 0 to make sure we have tests are not delayed builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0); - builder.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); if (randomBoolean()) { builder.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)); } @@ -874,13 +872,7 @@ public void assertResultsAndLogOnFailure(long expectedResults, SearchResponse se sb.append(value).append("] results. expected [").append(expectedResults).append("]"); String failMsg = sb.toString(); for (SearchHit hit : searchResponse.getHits().getHits()) { - sb.append("\n-> _index: [") - .append(hit.getIndex()) - .append("] type [") - .append(hit.getType()) - .append("] id [") - .append(hit.getId()) - .append("]"); + sb.append("\n-> _index: [").append(hit.getIndex()).append("] id [").append(hit.getId()).append("]"); } logger.warn("{}", sb); fail(failMsg); @@ -1539,10 +1531,9 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) throws InterruptedException { Random random = random(); - Map> indicesAndTypes = new HashMap<>(); + Set indices = new HashSet<>(); for (IndexRequestBuilder builder : builders) { - final Set types = indicesAndTypes.computeIfAbsent(builder.request().index(), index -> new HashSet<>()); - types.add(builder.request().type()); + indices.add(builder.request().index()); } Set> bogusIds = new HashSet<>(); // (index, type, id) if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) { @@ -1551,22 +1542,18 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2); final int unicodeLen = between(1, 10); for (int i = 0; i < numBogusDocs; i++) { - String id = "bogus_doc_" - + randomRealisticUnicodeOfLength(unicodeLen) - + Integer.toString(dummmyDocIdGenerator.incrementAndGet()); - Map.Entry> indexAndTypes = RandomPicks.randomFrom(random, indicesAndTypes.entrySet()); - String index = indexAndTypes.getKey(); - String type = RandomPicks.randomFrom(random, indexAndTypes.getValue()); - bogusIds.add(Arrays.asList(index, type, id)); + String id = "bogus_doc_" + randomRealisticUnicodeOfLength(unicodeLen) + dummmyDocIdGenerator.incrementAndGet(); + String index = RandomPicks.randomFrom(random, indices); + bogusIds.add(Arrays.asList(index, id)); // We configure a routing key in case the mapping requires it - builders.add(client().prepareIndex(index, type, id).setSource("{}", XContentType.JSON).setRouting(id)); + builders.add(client().prepareIndex().setIndex(index).setId(id).setSource("{}", XContentType.JSON).setRouting(id)); } } Collections.shuffle(builders, random()); final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); List inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. - final String[] indices = indicesAndTypes.keySet().toArray(new String[0]); + final String[] indicesArray = indices.toArray(new String[] {}); if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) { if (frequently()) { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false); @@ -1574,13 +1561,13 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma indexRequestBuilder.execute( new PayloadLatchedActionListener<>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors) ); - postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); + postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush); } } else { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder.execute().actionGet(); - postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); + postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush); } } } else { @@ -1616,15 +1603,15 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (List doc : bogusIds) { assertEquals( - "failed to delete a dummy doc [" + doc.get(0) + "][" + doc.get(2) + "]", + "failed to delete a dummy doc [" + doc.get(0) + "][" + doc.get(1) + "]", DocWriteResponse.Result.DELETED, - client().prepareDelete(doc.get(0), doc.get(1), doc.get(2)).setRouting(doc.get(2)).get().getResult() + client().prepareDelete(doc.get(0), null, doc.get(1)).setRouting(doc.get(1)).get().getResult() ); } } if (forceRefresh) { assertNoFailures( - client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).get() + client().admin().indices().prepareRefresh(indicesArray).setIndicesOptions(IndicesOptions.lenientExpandOpen()).get() ); } } @@ -1675,20 +1662,11 @@ private void postIndexAsyncActions(String[] indices, List inFlig .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .execute(new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (maybeFlush && rarely()) { - if (randomBoolean()) { - client().admin() - .indices() - .prepareFlush(indices) - .setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .execute(new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); - } else { - client().admin() - .indices() - .syncedFlush( - syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()), - new LatchedActionListener<>(newLatch(inFlightAsyncOperations)) - ); - } + client().admin() + .indices() + .prepareFlush(indices) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .execute(new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (rarely()) { client().admin() .indices() diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index 0b5474b21cba6..960400019f7ea 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -113,16 +113,7 @@ protected void startNode(long seed) throws Exception { .preparePutTemplate("random-soft-deletes-template") .setPatterns(Collections.singletonList("*")) .setOrder(0) - .setSettings( - Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) - .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), - randomBoolean() - ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) - : between(0, 1000) - ) - ) + .setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000))) .get(); } diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index a5c6eff15060d..0e91332892a55 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -109,7 +109,7 @@ public class TestSearchContext extends SearchContext { private int terminateAfter = DEFAULT_TERMINATE_AFTER; private SearchContextAggregations aggregations; private ScrollContext scrollContext; - + private FieldDoc searchAfter; private final long originNanoTime = System.nanoTime(); private final Map searchExtBuilders = new HashMap<>(); @@ -393,13 +393,14 @@ public int trackTotalHitsUpTo() { } @Override - public SearchContext searchAfter(FieldDoc searchAfter) { - return null; + public SearchContext searchAfter(FieldDoc searchAfterDoc) { + this.searchAfter = searchAfterDoc; + return this; } @Override public FieldDoc searchAfter() { - return null; + return searchAfter; } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java index 1a83a1d615de9..5945ac01b4547 100644 --- a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java @@ -284,8 +284,8 @@ public static BytesReference insertRandomFields( * */ static List getInsertPaths(XContentParser parser, Stack currentPath) throws IOException { - assert parser.currentToken() == XContentParser.Token.START_OBJECT - || parser.currentToken() == XContentParser.Token.START_ARRAY : "should only be called when new objects or arrays start"; + assert parser.currentToken() == XContentParser.Token.START_OBJECT || parser.currentToken() == XContentParser.Token.START_ARRAY + : "should only be called when new objects or arrays start"; List validPaths = new ArrayList<>(); // parser.currentName() can be null for root object and unnamed objects in arrays if (parser.currentName() != null) { diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index e0e05bf103dbe..c37eb68a42836 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -269,14 +269,7 @@ public static void assertSearchHits(SearchResponse searchResponse, String... ids Set idsSet = new HashSet<>(Arrays.asList(ids)); for (SearchHit hit : searchResponse.getHits()) { assertThat( - "id [" - + hit.getId() - + "] was found in search results but wasn't expected (type [" - + hit.getType() - + "], index [" - + hit.getIndex() - + "])" - + shardStatus, + "id [" + hit.getId() + "] was found in search results but wasn't expected (index [" + hit.getIndex() + "])" + shardStatus, idsSet.remove(hit.getId()), equalTo(true) ); @@ -318,13 +311,7 @@ public static void assertHitCount(SearchResponse countResponse, long expectedHit } public static void assertExists(GetResponse response) { - String message = String.format( - Locale.ROOT, - "Expected %s/%s/%s to exist, but does not", - response.getIndex(), - response.getType(), - response.getId() - ); + String message = String.format(Locale.ROOT, "Expected %s/%s to exist, but does not", response.getIndex(), response.getId()); assertThat(message, response.isExists(), is(true)); } @@ -553,10 +540,6 @@ public static Matcher hasId(final String id) { return new OpenSearchMatchers.SearchHitHasIdMatcher(id); } - public static Matcher hasType(final String type) { - return new OpenSearchMatchers.SearchHitHasTypeMatcher(type); - } - public static Matcher hasIndex(final String index) { return new OpenSearchMatchers.SearchHitHasIndexMatcher(index); } diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java index 38f569f8a4526..5889b7e269ed2 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java @@ -65,29 +65,6 @@ public void describeTo(final Description description) { } } - public static class SearchHitHasTypeMatcher extends TypeSafeMatcher { - private String type; - - public SearchHitHasTypeMatcher(String type) { - this.type = type; - } - - @Override - public boolean matchesSafely(final SearchHit searchHit) { - return searchHit.getType().equals(type); - } - - @Override - public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) { - mismatchDescription.appendText(" was ").appendValue(searchHit.getType()); - } - - @Override - public void describeTo(final Description description) { - description.appendText("searchHit type should be ").appendValue(type); - } - } - public static class SearchHitHasIndexMatcher extends TypeSafeMatcher { private String index; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 6405d5177f389..9603b63337842 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -71,7 +71,6 @@ import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.ReplicationTracker; -import org.opensearch.indices.flush.SyncedFlushService; import org.opensearch.rest.RestStatus; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchTestCase; @@ -101,6 +100,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; @@ -727,7 +727,8 @@ private static void deleteAllSLMPolicies() throws IOException { Response response = adminClient().performRequest(new Request("GET", "/_slm/policy")); policies = entityAsMap(response); } catch (ResponseException e) { - if (RestStatus.METHOD_NOT_ALLOWED.getStatus() == e.getResponse().getStatusLine().getStatusCode()) { + if (RestStatus.METHOD_NOT_ALLOWED.getStatus() == e.getResponse().getStatusLine().getStatusCode() + || RestStatus.BAD_REQUEST.getStatus() == e.getResponse().getStatusLine().getStatusCode()) { // If bad request returned, SLM is not enabled. return; } @@ -1269,13 +1270,27 @@ protected static Version minimumNodeVersion() throws IOException { return minVersion; } - protected static void performSyncedFlush(String indexName, boolean retryOnConflict) throws Exception { + protected void syncedFlush(String indexName, boolean retryOnConflict) throws Exception { final Request request = new Request("POST", indexName + "/_flush/synced"); - final List expectedWarnings = Collections.singletonList(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE); final Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(warnings -> warnings.isEmpty() == false && warnings.equals(expectedWarnings) == false); + // 8.0 kept in warning message for legacy purposes TODO: changge to 3.0 + final List warningMessage = Arrays.asList( + "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead." + ); + final List expectedWarnings = Arrays.asList( + "Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version." + ); + if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_2_0_0))) { + options.setWarningsHandler(warnings -> warnings.isEmpty() == false && warnings.equals(expectedWarnings) == false); + } else if (nodeVersions.stream().anyMatch(version -> version.onOrAfter(LegacyESVersion.V_7_6_0))) { + options.setWarningsHandler( + warnings -> warnings.isEmpty() == false + && warnings.equals(expectedWarnings) == false + && warnings.equals(warningMessage) == false + ); + } request.setOptions(options); - // We have to spin a synced-flush request because we fire the global checkpoint sync for the last write operation. + // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. assertBusy(() -> { try { @@ -1291,6 +1306,26 @@ protected static void performSyncedFlush(String indexName, boolean retryOnConfli } } }); + // ensure the global checkpoint is synced; otherwise we might trim the commit with syncId + ensureGlobalCheckpointSynced(indexName); + } + + @SuppressWarnings("unchecked") + private void ensureGlobalCheckpointSynced(String index) throws Exception { + assertBusy(() -> { + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); + shardStats.stream() + .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) + .filter(Objects::nonNull) + .forEach(seqNoStat -> { + long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); + long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); + long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); + assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); + assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); + }); + }, 60, TimeUnit.SECONDS); } static final Pattern CREATE_INDEX_MULTIPLE_MATCHING_TEMPLATES = Pattern.compile( diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index db8deb55ae5bc..375103d2c1d0f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -75,7 +75,7 @@ /** * Runs a suite of yaml tests shared with all the official OpenSearch - * clients against against an opensearch cluster. + * clients against an opensearch cluster. * * The suite timeout is extended to account for projects with a large number of tests. */ diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index 10fa5517367b0..ec88cd0201db5 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -107,6 +107,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.transport.TransportHandshaker.V_3_0_0; import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -2226,10 +2227,10 @@ public void testHandshakeUpdatesVersion() throws IOException { TransportRequestOptions.Type.STATE ); try (Transport.Connection connection = serviceA.openConnection(node, builder.build())) { - // OpenSearch [1.0:2.0) in bwc mode should only "upgrade" to Legacy v7.10.2 + // OpenSearch [1.0:3.0) in bwc mode should only "upgrade" to Legacy v7.10.2 assertEquals( connection.getVersion(), - version.onOrAfter(Version.V_1_0_0) && version.before(Version.V_2_0_0) ? LegacyESVersion.V_7_10_2 : version + version.onOrAfter(Version.V_1_0_0) && version.before(V_3_0_0) ? LegacyESVersion.V_7_10_2 : version ); } } @@ -2275,7 +2276,9 @@ public void testTcpHandshake() { PlainActionFuture future = PlainActionFuture.newFuture(); serviceA.getOriginalTransport().openConnection(node, connectionProfile, future); try (Transport.Connection connection = future.actionGet()) { - assertEquals(Version.V_2_0_0, connection.getVersion()); + // OpenSearch sends a handshake version spoofed as Legacy version 7_10_2 + // todo change for OpenSearch 3.0.0 when Legacy compatibility is removed + assertEquals(LegacyESVersion.V_7_10_2, connection.getVersion()); } } } @@ -2482,31 +2485,26 @@ public void testConcurrentDisconnectOnNonPublishedConnection() throws IOExceptio MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY); CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); - serviceC.registerRequestHandler( - "internal:action", - ThreadPool.Names.SAME, - TestRequest::new, - (request, channel, task) -> { - // don't block on a network thread here - threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (IOException e1) { - throw new UncheckedIOException(e1); - } + serviceC.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { + // don't block on a network thread here + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + throw new UncheckedIOException(e1); } + } - @Override - protected void doRun() throws Exception { - receivedLatch.countDown(); - sendResponseLatch.await(); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - }); - } - ); + @Override + protected void doRun() throws Exception { + receivedLatch.countDown(); + sendResponseLatch.await(); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + }); + }); serviceC.start(); serviceC.acceptIncomingRequests(); CountDownLatch responseLatch = new CountDownLatch(1); @@ -2561,31 +2559,26 @@ public void testTransportStats() throws Exception { MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY); CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); - serviceB.registerRequestHandler( - "internal:action", - ThreadPool.Names.SAME, - TestRequest::new, - (request, channel, task) -> { - // don't block on a network thread here - threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (IOException e1) { - throw new UncheckedIOException(e1); - } + serviceB.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { + // don't block on a network thread here + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + throw new UncheckedIOException(e1); } + } - @Override - protected void doRun() throws Exception { - receivedLatch.countDown(); - sendResponseLatch.await(); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - }); - } - ); + @Override + protected void doRun() throws Exception { + receivedLatch.countDown(); + sendResponseLatch.await(); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + }); + }); serviceC.start(); serviceC.acceptIncomingRequests(); CountDownLatch responseLatch = new CountDownLatch(1); @@ -2685,31 +2678,26 @@ public void testTransportStatsWithException() throws Exception { CountDownLatch sendResponseLatch = new CountDownLatch(1); Exception ex = new RuntimeException("boom"); ex.setStackTrace(new StackTraceElement[0]); - serviceB.registerRequestHandler( - "internal:action", - ThreadPool.Names.SAME, - TestRequest::new, - (request, channel, task) -> { - // don't block on a network thread here - threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (IOException e1) { - throw new UncheckedIOException(e1); - } + serviceB.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> { + // don't block on a network thread here + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + throw new UncheckedIOException(e1); } + } - @Override - protected void doRun() throws Exception { - receivedLatch.countDown(); - sendResponseLatch.await(); - onFailure(ex); - } - }); - } - ); + @Override + protected void doRun() throws Exception { + receivedLatch.countDown(); + sendResponseLatch.await(); + onFailure(ex); + } + }); + }); serviceC.start(); serviceC.acceptIncomingRequests(); CountDownLatch responseLatch = new CountDownLatch(1); diff --git a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java index ff4fa54769287..d79e1730e16f6 100644 --- a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -108,10 +108,6 @@ protected void assertTotalHits(int expectedTotalHits, Map response) { } protected static int extractTotalHits(Map response) { - if (isRunningAgainstOldCluster() && getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - return (Integer) XContentMapValues.extractValue("hits.total", response); - } else { - return (Integer) XContentMapValues.extractValue("hits.total.value", response); - } + return (Integer) XContentMapValues.extractValue("hits.total.value", response); } } diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index a80d8a115bca5..2cf271a0f601b 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.java' dependencies { - api 'org.ow2.asm:asm:7.1' - api 'org.ow2.asm:asm-tree:7.1' + api 'org.ow2.asm:asm:9.2' + api 'org.ow2.asm:asm-tree:9.2' api 'org.ow2.asm:asm-analysis:7.1' api "org.apache.logging.log4j:log4j-api:${versions.log4j}" testImplementation project(":test:framework")